pax_global_header 0000666 0000000 0000000 00000000064 15131732575 0014523 g ustar 00root root 0000000 0000000 52 comment=4fef6d9d4687fa8c1df49d92d3e5ef44fe9711ac
cinder-27.0.0+git20260115.159.4fef6d9d4/ 0000775 0000000 0000000 00000000000 15131732575 0016370 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/.coveragerc 0000664 0000000 0000000 00000000131 15131732575 0020504 0 ustar 00root root 0000000 0000000 [run]
branch = True
source = cinder
omit = cinder/tests/*
[report]
ignore_errors = True
cinder-27.0.0+git20260115.159.4fef6d9d4/.gitignore 0000664 0000000 0000000 00000001350 15131732575 0020357 0 ustar 00root root 0000000 0000000 *.DS_Store
*.log
*.mo
*.pyc
*.sqlite
/.*
!.coveragerc
!.gitignore
!.mailmap
!.testr.conf
!.stestr.conf
!.zuul.yaml
.*.sw?
AUTHORS
Authors
build/*
build-stamp
CA/
ChangeLog
cinder.egg-info
cover/*
covhtml
dist/*
etc/cinder/cinder.conf.sample
etc/cinder/policy.yaml.sample
instances
keeper
keys
local_settings.py
mypy-report
tools/lintstack.head.py
tools/pylint_exceptions
tags
# Files created by Sphinx build
doc/build
doc/source/_static/cinder.conf.sample
doc/source/_static/cinder.policy.yaml.sample
doc/source/drivers.rst
# Files created for API reference
api-ref/build
# Files created by releasenotes build
releasenotes/build
contrib/block-box/.db_data
RELEASENOTES.rst
releasenotes/notes/reno.cache
# Files created by alembic
cinder.db
cinder-27.0.0+git20260115.159.4fef6d9d4/.gitreview 0000664 0000000 0000000 00000000111 15131732575 0020367 0 ustar 00root root 0000000 0000000 [gerrit]
host=review.opendev.org
port=29418
project=openstack/cinder.git
cinder-27.0.0+git20260115.159.4fef6d9d4/.pre-commit-config.yaml 0000664 0000000 0000000 00000001603 15131732575 0022651 0 ustar 00root root 0000000 0000000 ---
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v6.0.0
hooks:
- id: trailing-whitespace
- id: mixed-line-ending
args: ['--fix', 'lf']
exclude: '.*\.(svg)$'
- id: fix-byte-order-marker
- id: check-executables-have-shebangs
- id: check-merge-conflict
- id: debug-statements
- id: check-json
files: .*\.json$
- id: check-yaml
files: .*\.(yaml|yml)$
exclude: 'rally-jobs/cinder.yaml'
- repo: https://github.com/Lucas-C/pre-commit-hooks
rev: v1.5.5
hooks:
- id: remove-tabs
exclude: '.*\.(svg)$'
- repo: https://opendev.org/openstack/hacking
rev: 7.0.0
hooks:
- id: hacking
additional_dependencies: []
exclude: '^(doc|releasenotes|tools)/.*$'
- repo: https://github.com/PyCQA/doc8
rev: v2.0.0
hooks:
- id: doc8
cinder-27.0.0+git20260115.159.4fef6d9d4/.pylintrc 0000664 0000000 0000000 00000015235 15131732575 0020243 0 ustar 00root root 0000000 0000000 [MASTER]
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code.
extension-pkg-whitelist=
# Add files or directories to the blacklist. They should be base names, not
# paths.
ignore=CVS,tests,test
# Add files or directories matching the regex patterns to the blacklist. The
# regex matches against base names, not paths.
ignore-patterns=
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
#init-hook=
# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
# number of processors available to use.
jobs=1
# Control the amount of potential inferred values when inferring a single
# object. This can help the performance when dealing with large functions or
# complex, nested conditions.
limit-inference-results=100
# List of plugins (as comma separated values of python modules names) to load,
# usually to register additional checkers.
load-plugins=
# Pickle collected data for later comparisons.
persistent=yes
# Specify a configuration file.
#rcfile=
# When enabled, pylint would attempt to guess common misconfiguration and emit
# user-friendly hints instead of false-positive error messages.
suggestion-mode=yes
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no
[MESSAGES CONTROL]
# Only show warnings with the listed confidence levels. Leave empty to show
# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED.
confidence=
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once). You can also use "--disable=all" to
# disable everything first and then reenable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use "--disable=all --enable=classes
# --disable=W".
disable=
# "F" Fatal errors that prevent further processing
import-error,
# "I" Informational noise
locally-disabled,
c-extension-no-member,
# "E" Error for important programming issues (likely bugs)
access-member-before-definition,
bad-super-call,
no-member,
no-method-argument,
no-name-in-module,
no-self-argument,
no-value-for-parameter,
unsubscriptable-object,
method-hidden,
not-callable,
keyword-arg-before-vararg,
too-many-function-args,
unsupported-assignment-operation,
not-an-iterable,
unsupported-membership-test,
unsupported-assignment-operation,
raising-bad-type,
bad-option-value,
unexpected-keyword-arg,
assignment-from-none,
assignment-from-no-return,
# "W" Warnings for stylistic problems or minor programming issues
exec-used,
pointless-statement,
unnecessary-lambda,
abstract-method,
arguments-differ,
attribute-defined-outside-init,
bad-builtin,
bad-indentation,
broad-except,
deprecated-lambda,
expression-not-assigned,
fixme,
global-statement,
global-variable-not-assigned,
no-init,
non-parent-init-called,
protected-access,
redefined-builtin,
redefined-outer-name,
reimported,
signature-differs,
star-args,
super-init-not-called,
unpacking-non-sequence,
unused-argument,
unused-import,
undefined-loop-variable,
bad-staticmethod-argument,
deprecated-method,
useless-else-on-loop,
lost-exception,
pointless-string-statement,
useless-super-delegation,
deprecated-method,
dangerous-default-value,
wildcard-import,
bad-staticmethod-argument,
eval-used,
blacklisted-name,
pointless-statement,
try-except-raise,
# "C" Coding convention violations
bad-continuation,
invalid-name,
missing-docstring,
old-style-class,
superfluous-parens,
wrong-import-position,
wrong-import-order,
ungrouped-imports,
unused-variable,
len-as-condition,
cell-var-from-loop,
singleton-comparison,
misplaced-comparison-constant,
unidiomatic-typecheck,
consider-using-enumerate,
bad-whitespace,
line-too-long,
useless-super-delegation,
pointless-string-statement,
unsupported-membership-test,
bad-classmethod-argument,
bad-mcs-classmethod-argument,
# "R" Refactor recommendations
abstract-class-little-used,
abstract-class-not-used,
duplicate-code,
interface-not-implemented,
no-self-use,
too-few-public-methods,
too-many-ancestors,
too-many-arguments,
too-many-branches,
too-many-instance-attributes,
too-many-lines,
too-many-locals,
too-many-public-methods,
too-many-return-statements,
too-many-statements,
too-many-nested-blocks,
no-else-return,
inconsistent-return-statements,
simplifiable-if-statement,
too-many-boolean-expressions,
cyclic-import,
redefined-argument-from-local,
consider-using-ternary,
literal-comparison,
too-many-boolean-expressions,
useless-object-inheritance,
trailing-comma-tuple,
useless-object-inheritance,
consider-using-set-comprehension,
consider-using-in,
useless-return,
chained-comparison
[REPORTS]
# Tells whether to display a full report or only the messages.
reports=no
[BASIC]
# Variable names can be 1 to 31 characters long, with lowercase and underscores
variable-rgx=[a-z_][a-z0-9_]{0,30}$
# Argument names can be 2 to 31 characters long, with lowercase and underscores
argument-rgx=[a-z_][a-z0-9_]{1,30}$
# Method names should be at least 3 characters long
# and be lowercased with underscores
method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$
# Module names matching neutron-* are ok (files in bin/)
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$
# Don't require docstrings on tests.
no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$
mixin-class-rgx=(^(ManageResource)$|.*[Mm]ixin)
[FORMAT]
# Maximum number of characters on a single line.
max-line-length=79
[VARIABLES]
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid to define new builtins when possible.
additional-builtins=_
[TYPECHECK]
# List of module names for which member attributes should not be checked
ignored-modules=six.moves,_MovedItems,alembic.context,alembic.op,
alembic.config,pyxcli,storpool,oslo_privsep.capabilities,nvmet
signature-mutators=unittest.mock.patch,unittest.mock.patch.object,sqlalchemy.util._preloaded.dependencies
# This is for cinder.objects.*, and requests.packages.*, but due to
# https://github.com/PyCQA/pylint/issues/2498
# it doesn't seem that generated-members can be specified correctly.
# Clean this up later when pylint works correctly.
generated-members=objects,requests
cinder-27.0.0+git20260115.159.4fef6d9d4/.stestr.conf 0000664 0000000 0000000 00000000104 15131732575 0020634 0 ustar 00root root 0000000 0000000 [DEFAULT]
test_path=${OS_TEST_PATH:-./cinder/tests/unit}
top_dir=./
cinder-27.0.0+git20260115.159.4fef6d9d4/.zuul.yaml 0000664 0000000 0000000 00000031225 15131732575 0020334 0 ustar 00root root 0000000 0000000 - project:
templates:
- openstack-python3-jobs
- openstack-python3-jobs-arm64
- publish-openstack-docs-pti
- periodic-stable-jobs
- check-requirements
- integrated-gate-storage
- release-notes-jobs-python3
check:
jobs:
- cinder-code-coverage:
voting: false
- cinder-mypy
- cinder-tox-bandit-baseline:
voting: false
- openstack-tox-functional-py310:
irrelevant-files: &functional-irrelevant-files
- ^.*\.rst$
- ^cinder/locale/.*$
- ^cinder/tests/hacking/.*$
- ^cinder/tests/unit.*$
- ^doc/.*$
- ^releasenotes/.*$
- ^reno.yaml$
- openstack-tox-functional-py313:
irrelevant-files: *functional-irrelevant-files
- cinder-rally-task:
voting: false
irrelevant-files: *functional-irrelevant-files
- openstack-tox-pylint:
voting: false
timeout: 5400
irrelevant-files:
- ^.*\.rst$
- ^api-ref/.*$
- ^cinder/locale/.*$
- ^cinder/tests/hacking/.*$
- ^cinder/tests/unit.*$
- ^doc/.*$
- ^releasenotes/.*$
- ^reno.yaml$
- cinder-plugin-ceph-tempest:
irrelevant-files: &gate-irrelevant-files
- ^(test-|)requirements.txt$
- ^.*\.rst$
- ^api-ref/.*$
- ^cinder/cmd/status\.py$
- ^cinder/locale/.*$
- ^cinder/tests/functional.*$
- ^cinder/tests/hacking/.*$
- ^cinder/tests/unit.*$
- ^doc/.*$
- ^releasenotes/.*$
- ^reno.yaml$
- ^setup.cfg$
- ^tools/.*$
- ^tox.ini$
- cinder-plugin-ceph-tempest-mn-aa:
voting: false
irrelevant-files: *gate-irrelevant-files
- cinder-tempest-plugin-lvm-lio-barbican:
# NOTE: we use this as a canary job to make sure at least
# one expensive tempest job is run on changes excluded by
# the gate-irrelevant-files defined above
irrelevant-files:
- ^.*\.rst$
- ^api-ref/.*$
- ^cinder/cmd/status\.py$
- ^cinder/locale/.*$
- ^cinder/tests/functional.*$
- ^cinder/tests/hacking/.*$
- ^cinder/tests/unit.*$
- ^doc/.*$
- ^releasenotes/.*$
- ^reno.yaml$
- ^tools/.*$
- cinder-tempest-plugin-lvm-lio-barbican-fips-py311:
voting: false
irrelevant-files: *gate-irrelevant-files
- cinder-tempest-plugin-protection-functional:
irrelevant-files: *gate-irrelevant-files
- cinder-grenade-mn-sub-volbak:
irrelevant-files: *gate-irrelevant-files
- cinder-tempest-lvm-multibackend:
voting: false
irrelevant-files: *gate-irrelevant-files
- cinder-for-glance-optimized:
voting: false
irrelevant-files: *gate-irrelevant-files
- devstack-plugin-nfs-tempest-full:
irrelevant-files: *gate-irrelevant-files
- devstack-plugin-nfs-tempest-full-fips-py311:
voting: false
irrelevant-files: *gate-irrelevant-files
- tempest-slow-py3:
irrelevant-files: *gate-irrelevant-files
- tempest-integrated-storage:
irrelevant-files: *gate-irrelevant-files
- grenade:
irrelevant-files: *gate-irrelevant-files
- grenade-skip-level:
irrelevant-files: *gate-irrelevant-files
- tempest-ipv6-only:
irrelevant-files: *gate-irrelevant-files
- openstacksdk-functional-devstack:
irrelevant-files: *gate-irrelevant-files
gate:
jobs:
- cinder-grenade-mn-sub-volbak:
irrelevant-files: *gate-irrelevant-files
- cinder-plugin-ceph-tempest:
irrelevant-files: *gate-irrelevant-files
- tempest-integrated-storage:
irrelevant-files: *gate-irrelevant-files
- grenade:
irrelevant-files: *gate-irrelevant-files
- tempest-ipv6-only:
irrelevant-files: *gate-irrelevant-files
- openstacksdk-functional-devstack:
irrelevant-files: *gate-irrelevant-files
experimental:
jobs:
- cinder-multibackend-matrix-migration:
irrelevant-files: *gate-irrelevant-files
- cinder-grenade-mn-sub-volschbak:
irrelevant-files: *gate-irrelevant-files
- cinder-grenade-mn-sub-bak:
irrelevant-files: *gate-irrelevant-files
- devstack-plugin-ceph-tempest-py3:
irrelevant-files: *gate-irrelevant-files
- tempest-pg-full:
irrelevant-files: *gate-irrelevant-files
- job:
# Security testing for known issues
name: cinder-tox-bandit-baseline
parent: openstack-tox
timeout: 2400
vars:
tox_envlist: bandit-baseline
required-projects:
- openstack/requirements
irrelevant-files: *gate-irrelevant-files
- job:
name: cinder-code-coverage
parent: openstack-tox-cover
timeout: 2400
irrelevant-files:
- ^(test-|)requirements.txt$
- ^.*\.rst$
- ^api-ref/.*$
- ^cinder/cmd/status\.py$
- ^cinder/locale/.*$
- ^doc/.*$
- ^releasenotes/.*$
- ^reno.yaml$
- ^setup.cfg$
- ^tools/.*$
- ^tox.ini$
- job:
name: cinder-rally-task
parent: rally-task-cinder
timeout: 7800
vars:
devstack_localrc:
OSPROFILER_COLLECTOR: redis
devstack_plugins:
osprofiler: https://opendev.org/openstack/osprofiler
rally-openstack: https://opendev.org/openstack/rally-openstack
rally_task: rally-jobs/cinder.yaml
required-projects:
- openstack/rally-openstack
- openstack/osprofiler
- job:
name: cinder-plugin-ceph-tempest
parent: devstack-plugin-ceph-tempest-py3
roles:
- zuul: opendev.org/openstack/cinder-tempest-plugin
vars:
# FIXME: change I29b1af0a4034decad to tempest added image format tests that
# cannot pass in this job because the image data takes a optimized path that
# bypasses nova's checks. Until the nova team decides on a strategy to handle
# this issue, we skip these tests.
tempest_exclude_regex: (tempest.api.image.v2.test_images_formats.ImagesFormatTest.test_compute_rejects)
devstack_localrc:
CEPH_MIN_CLIENT_VERSION: "mimic"
# NOTE: if jobs are having memory problems, may want
# to turn this on (currently defaults to false):
# MYSQL_REDUCE_MEMORY: true
devstack_local_conf:
post-config:
$GLANCE_API_CONF:
DEFAULT:
do_secure_hash: False
test-config:
$TEMPEST_CONFIG:
volume-feature-enabled:
volume_revert: True
timeout: 10800
- job:
# this depends on some ceph admin setup which is not yet complete
# TODO(alee) enable this test when ceph admin work is complete.
name: cinder-plugin-ceph-tempest-fips
parent: cinder-plugin-ceph-tempest
nodeset: devstack-single-node-centos-9-stream
pre-run: playbooks/enable-fips.yaml
vars:
configure_swap_size: 4096
nslookup_target: 'opendev.org'
- job:
name: cinder-plugin-ceph-tempest-mn-aa
parent: devstack-plugin-ceph-multinode-tempest-py3
roles:
- zuul: opendev.org/openstack/cinder-tempest-plugin
vars:
configure_swap_size: 4096
devstack_localrc:
TEMPEST_VOLUME_REVERT_TO_SNAPSHOT: True
# NOTE: if jobs are having memory problems, may want
# to turn this on (currently defaults to false):
# MYSQL_REDUCE_MEMORY: true
devstack_local_conf:
post-config:
$CINDER_CONF:
DEFAULT:
cluster: ceph
- job:
name: cinder-grenade-mn-sub-bak
parent: grenade-multinode
description: |
Cinder grenade multinode job where cinder-backup only runs
on the subnode.
It tests the new c-api, c-sch, c-vol (on the controller node)
with the old c-bak (on the subnode).
Former names for this job were:
* cinder-grenade-dsvm-mn-sub-bak
* legacy-grenade-dsvm-cinder-mn-sub-bak
required-projects:
- opendev.org/openstack/grenade
- opendev.org/openstack/cinder
vars:
devstack_services:
c-bak: false
c-vol: true
group-vars:
subnode:
devstack_services:
c-bak: true
c-vol: false
- job:
name: cinder-grenade-mn-sub-volbak
parent: grenade-multinode
description: |
Cinder grenade multinode job where cinder-backup and cinder-volume
only run on the subnode.
It tests the new c-api, c-sch (on the controller node)
with the old c-bak, c-vol (on the subnode).
Former names for this job were:
* cinder-grenade-dsvm-mn-sub-volbak
* legacy-grenade-dsvm-cinder-mn-sub-volbak
required-projects:
- opendev.org/openstack/grenade
- opendev.org/openstack/cinder
vars:
devstack_services:
c-bak: false
c-vol: false
group-vars:
subnode:
devstack_services:
c-bak: true
c-vol: true
- job:
name: cinder-grenade-mn-sub-volschbak
parent: grenade-multinode
description: |
Cinder grenade multinode job where cinder-backup, cinder-volume
and cinder-scheduler only run on the subnode.
It tests the new c-api (on the controller node)
with the old c-bak, c-sch, c-vol (on the subnode).
Former names for this job were:
* cinder-grenade-dsvm-mn-sub-volschbak
* legacy-grenade-dsvm-cinder-mn-sub-volschbak
required-projects:
- opendev.org/openstack/grenade
- opendev.org/openstack/cinder
vars:
devstack_services:
c-bak: false
c-sch: false
c-vol: false
group-vars:
subnode:
devstack_services:
c-bak: true
c-sch: true
c-vol: true
- job:
name: cinder-tempest-lvm-multibackend
parent: devstack-tempest
description: |
Cinder tempest job based on LVM and multiple backends.
Former names for this job were:
* legacy-tempest-dsvm-lvm-multibackend
timeout: 10800
required-projects:
- opendev.org/openstack/cinder-tempest-plugin
vars:
tox_envlist: all
tempest_test_regex: '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario)|(^cinder_tempest_plugin))'
tempest_plugins:
- cinder-tempest-plugin
devstack_localrc:
CINDER_ENABLED_BACKENDS: 'lvm:lvmdriver-1,lvm:lvmdriver-2'
CINDER_VOLUME_CLEAR: none
irrelevant-files: *gate-irrelevant-files
- job:
name: cinder-mypy
parent: openstack-tox
vars:
tox_envlist: mypy
tox_inline_comments: false
- job:
name: cinder-for-glance-optimized
parent: cinder-tempest-plugin-basic
description: |
Configures glance with cinder as a backend for multiple glance cinder
stores and with cinder configured to use the optimized workflow of
moving image data directly in the backend.
vars:
devstack_localrc:
USE_CINDER_FOR_GLANCE: True
GLANCE_ENABLE_MULTIPLE_STORES: True
CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1
GLANCE_CINDER_DEFAULT_BACKEND: lvmdriver-1
GLANCE_SHOW_DIRECT_URL: True
GLANCE_SHOW_MULTIPLE_LOCATIONS: True
CINDER_ALLOWED_DIRECT_URL_SCHEMES: cinder
CINDER_UPLOAD_OPTIMIZED: True
CINDER_UPLOAD_INTERNAL_TENANT: True
CINDER_USE_SERVICE_TOKEN: True
tempest_test_regex: '(cinder_tempest_plugin|tempest.api.volume.test_volumes_actions)'
- job:
name: cinder-multibackend-matrix-migration
parent: devstack-tempest
description: |
Run migration tests between several combinations of backends
(LVM, Ceph, NFS)
Former names for this job were:
* legacy-tempest-dsvm-multibackend-matrix
timeout: 10800
required-projects:
- opendev.org/openstack/devstack-plugin-ceph
- opendev.org/openstack/devstack-plugin-nfs
run: playbooks/cinder-multibackend-matrix.yaml
host-vars:
controller:
devstack_plugins:
devstack-plugin-ceph: https://opendev.org/openstack/devstack-plugin-ceph
devstack-plugin-nfs: https://opendev.org/openstack/devstack-plugin-nfs
vars:
devstack_localrc:
CINDER_ENABLED_BACKENDS: lvm:lvm,nfs:nfs,ceph:ceph
ENABLE_NFS_CINDER: true
devstack_local_conf:
test-config:
$TEMPEST_CONFIG:
volume:
build_timeout: 900
cinder-27.0.0+git20260115.159.4fef6d9d4/CONTRIBUTING.rst 0000664 0000000 0000000 00000001123 15131732575 0021026 0 ustar 00root root 0000000 0000000 The source repository for this project can be found at:
https://opendev.org/openstack/cinder
Pull requests submitted through GitHub are not monitored.
To start contributing to OpenStack, follow the steps in the contribution guide
to set up and use Gerrit:
https://docs.openstack.org/contributors/code-and-documentation/quick-start.html
Bugs should be filed on Launchpad:
https://bugs.launchpad.net/cinder
For more specific information about contributing to this repository, see the
cinder contributor guide:
https://docs.openstack.org/cinder/latest/contributor/contributing.html
cinder-27.0.0+git20260115.159.4fef6d9d4/HACKING.rst 0000664 0000000 0000000 00000003644 15131732575 0020175 0 ustar 00root root 0000000 0000000 Cinder Style Commandments
=========================
- Step 1: Read the OpenStack Style Commandments
https://docs.openstack.org/hacking/latest/
- Step 2: Read on
Cinder Specific Commandments
----------------------------
- [N322] Ensure default arguments are not mutable.
- [N323] Add check for explicit import of _() to ensure proper translation.
- [C301] timeutils.utcnow() from oslo_utils should be used instead of
datetime.now().
- [C303] Ensure that there are no 'print()' statements are used in code that
should be using LOG calls.
- [C309] Unit tests should not perform logging.
- [C310] Check for improper use of logging format arguments.
- [C311] Check for proper naming and usage in option registration.
- [C312] Validate that logs are not translated.
- [C313] Check that assertTrue(value) is used and not assertEqual(True, value).
- [C336] Must use a dict comprehension instead of a dict constructor with a
sequence of key-value pairs.
- [C337] Ensure the standard library mock modules is used and not the third
party mock library that was needed for Python 2 support.
- [C338] Log.warn is deprecated. Enforce use of LOG.warning.
General
-------
- Use 'raise' instead of 'raise e' to preserve original traceback or exception
being reraised::
except Exception as e:
...
raise e # BAD
except Exception:
...
raise # OKAY
Creating Unit Tests
-------------------
For every new feature, unit tests should be created that both test and
(implicitly) document the usage of said feature. If submitting a patch for a
bug that had no unit test, a new passing unit test should be added. If a
submitted bug fix does have a unit test, be sure to add a new one that fails
without the patch and passes with the patch.
For more information on creating unit tests and utilizing the testing
infrastructure in OpenStack Cinder, please see
https://docs.openstack.org/cinder/latest/contributor/testing.html
cinder-27.0.0+git20260115.159.4fef6d9d4/LICENSE 0000664 0000000 0000000 00000023637 15131732575 0017410 0 ustar 00root root 0000000 0000000
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
cinder-27.0.0+git20260115.159.4fef6d9d4/README.rst 0000664 0000000 0000000 00000001726 15131732575 0020065 0 ustar 00root root 0000000 0000000 ================
OpenStack Cinder
================
OpenStack Cinder is a storage service for an open cloud computing service.
You can learn more about Cinder at:
* `Wiki `__
* `Developer Docs `__
* `Blueprints `__
* `Release notes `__
* `Design specifications `__
Getting Started
---------------
If you'd like to run from the master branch, you can clone the git repo:
git clone https://opendev.org/openstack/cinder
If you'd like to contribute, please see the information in
`CONTRIBUTING.rst `_
You can raise bugs on `Launchpad `__
Python client
-------------
`Python Cinderclient `__
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/ 0000775 0000000 0000000 00000000000 15131732575 0017713 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/ 0000775 0000000 0000000 00000000000 15131732575 0021213 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/api_microversion_history.rst 0000664 0000000 0000000 00000000105 15131732575 0027072 0 ustar 00root root 0000000 0000000 .. include:: ../../cinder/api/openstack/rest_api_version_history.rst
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/conf.py 0000664 0000000 0000000 00000015120 15131732575 0022511 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Cinder documentation build configuration file, created by
# sphinx-quickstart on Sat May 1 15:17:47 2010.
#
# This file is execfile()d with the current directory set to
# its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'os_api_ref',
'openstackdocstheme',
]
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
copyright = u'OpenStack Foundation'
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/cinder'
openstackdocs_bug_project = 'cinder'
openstackdocs_bug_tag = 'api-ref'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# The reST default role (used for this markup: `text`) to use
# for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# -- Options for man page output ----------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme = '_theme'
html_theme = 'openstackdocs'
html_theme_options = {
"sidebar_mode": "toc",
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# " v documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'cinderdoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Cinder.tex', u'OpenStack Block Storage API Documentation',
u'OpenStack Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/index.rst 0000664 0000000 0000000 00000000557 15131732575 0023063 0 ustar 00root root 0000000 0000000 =================
Block Storage API
=================
Contents:
API content can be searched using the :ref:`search`.
Details for each microversion change can be found in the
:doc:`REST API Version History ` documentation.
.. toctree::
:hidden:
api_microversion_history
.. toctree::
:maxdepth: 2
v3/index
v2/index
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/status.yaml 0000664 0000000 0000000 00000003034 15131732575 0023422 0 ustar 00root root 0000000 0000000 200:
default: |
Request was successful.
201:
default: |
Request has been fulfilled and new resource created.
202:
default: |
Request is accepted, but processing may take some time.
203:
default: |
Returned information is not full set, but a subset.
204:
default: |
Request fulfilled but service does not return anything.
300:
default: |
The resource corresponds to more than one representation.
400:
default: |
Some content in the request was invalid.
401:
default: |
User must authenticate before making a request.
403:
default: |
Policy does not allow current user to do this operation.
404:
default: |
The requested resource could not be found.
405:
default: |
Method is not valid for this endpoint and resource.
409:
default: |
This resource has an action in progress that would conflict with this request.
413:
default: |
This operation cannot be completed.
415:
default: |
The entity of the request is in a format not supported by the requested
resource for the method.
500:
default: |
Something went wrong with the service which prevents it from fulfilling
the request.
501:
default: |
The service does not have the functionality required to fulfill this
request.
503:
default: |
The service cannot handle the request right now.
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/ 0000775 0000000 0000000 00000000000 15131732575 0021542 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/api-versions.inc 0000664 0000000 0000000 00000000772 15131732575 0024662 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
API versions
============
List Api Versions
~~~~~~~~~~~~~~~~~
.. rest_method:: GET /
Lists information for all Block Storage API versions.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
- 300
.. rest_status_code:: error ../status.yaml
- 400
- 401
- 403
- 404
- 405
- 500
- 503
Response
--------
**Example List Api Versions: JSON request**
.. literalinclude:: ./samples/versions-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/availability-zones-v2.inc 0000664 0000000 0000000 00000001535 15131732575 0026374 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Availability zones (os-availability-zone)
=========================================
List availability zone information.
Get Availability Zone Information
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/os-availability-zone
List availability zone information.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
Response Parameter
------------------
.. rest_parameters:: parameters.yaml
- project_id: project_id
- availabilityZoneInfo: availability_zone_info
- zoneName: availability_zone_3
- zoneState: availability_zone_state
- available: available
Response Example
----------------
.. literalinclude:: ./samples/availability-zone-list-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/capabilities-v2.inc 0000664 0000000 0000000 00000002235 15131732575 0025215 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Capabilities for storage back ends (capabilities)
=================================================
Shows capabilities for a storage back end.
Show back-end capabilities
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/capabilities/{hostname}
Shows capabilities for a storage back end on the host.
The ``hostname`` takes the form of ``hostname@volume_backend_name``.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- hostname: hostname
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- pool_name: pool_name
- description: description
- volume_backend_name: volume_backend_name
- namespace: namespace_1
- visibility: visibility
- driver_version: driver_version
- vendor_name: vendor_name
- properties: properties
- storage_protocol: storage_protocol
- replication_targets: replication_targets
- display_name: display_name
Response Example
----------------
.. literalinclude:: ./samples/backend-capabilities-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/consistencygroups-v2.inc 0000664 0000000 0000000 00000013040 15131732575 0026361 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Consistency groups
==================
Consistency groups enable you to create snapshots at the exact same
point in time from multiple volumes. For example, a database might
place its tables, logs, and configuration on separate volumes. To
restore this database from a previous point in time, it makes sense
to restore the logs, tables, and configuration together from the
exact same point in time.
Use the policy configuration file to grant permissions for these actions
to limit roles.
List consistency groups
~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/consistencygroups
Lists consistency groups.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
- sort_key: sort_key
- sort_dir: sort_dir
- limit: limit
- marker: marker
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- id: id
- name: name
Response Example
----------------
.. literalinclude:: ./samples/consistency-groups-list-response.json
:language: javascript
Create consistency group
~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/consistencygroups
Creates a consistency group.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- description: description_6
- availability_zone: availability_zone
- volume_types: volume_types_2
- name: name_15
Request Example
---------------
.. literalinclude:: ./samples/consistency-group-create-request.json
:language: javascript
Response
--------
.. rest_parameters:: parameters.yaml
- status: status_1
- description: description_11
- availability_zone: availability_zone
- created_at: created_at
- volume_types: volume_types
- name: name_15
- id: consistencygroup_id_1
Response Example
----------------
.. literalinclude:: ./samples/consistency-group-create-response.json
:language: javascript
Show consistency group details
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/consistencygroups/{consistencygroup_id}
Shows details for a consistency group.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- consistencygroup_id: consistencygroup_id
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- status: status_1
- description: description
- availability_zone: availability_zone
- created_at: created_at
- volume_types: volume_types
- id: id
- name: name
Response Example
----------------
.. literalinclude:: ./samples/consistency-group-show-response.json
:language: javascript
Create consistency group from source
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/consistencygroups/create_from_src
Creates a consistency group from source.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- status: status_1
- user_id: user_id
- description: description
- cgsnapshot_id: cgsnapshot_id
- source_cgid: source_cgid
- project_id: project_id_path
- name: name
- project_id: project_id
Request Example
---------------
.. literalinclude:: ./samples/consistency-group-create-from-src-request.json
:language: javascript
Delete consistency group
~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method::
POST /v2/{project_id}/consistencygroups/{consistencygroup_id}/delete
Deletes a consistency group.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- force: force
- project_id: project_id_path
- consistencygroup_id: consistencygroup_id
Request Example
---------------
.. literalinclude:: ./samples/consistency-group-delete-request.json
:language: javascript
List consistency groups with details
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/consistencygroups/detail
Lists consistency groups with details.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
- sort_key: sort_key
- sort_dir: sort_dir
- limit: limit
- marker: marker
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- status: status_1
- description: description
- availability_zone: availability_zone
- created_at: created_at
- volume_types: volume_types
- id: id
- name: name
Response Example
----------------
.. literalinclude:: ./samples/consistency-groups-list-detailed-response.json
:language: javascript
Update consistency group
~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method::
PUT /v2/{project_id}/consistencygroups/{consistencygroup_id}/update
Updates a consistency group.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- remove_volumes: remove_volumes
- description: description
- add_volumes: add_volumes
- name: name
- project_id: project_id_path
- consistencygroup_id: consistencygroup_id
Request Example
---------------
.. literalinclude:: ./samples/consistency-group-update-request.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/ext-backups-actions-v2.inc 0000664 0000000 0000000 00000003020 15131732575 0026441 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Backup actions (backups, action)
================================
Force-deletes a backup and reset status for a backup.
Force-delete backup
~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/backups/{backup_id}/action
Force-deletes a backup. Specify the ``os-force_delete`` action in the request
body.
This operation deletes the backup and any backup data.
The backup driver returns the ``405`` status code if it does not
support this operation.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 404
- 405
Request
-------
.. rest_parameters:: parameters.yaml
- os-force_delete: os-force_delete
- project_id: project_id_path
- backup_id: backup_id
Request Example
---------------
.. literalinclude:: ./samples/backup-force-delete-request.json
:language: javascript
Reset backup's status
~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/backups/{backup_id}/action
Reset a backup's status. Specify the ``os-reset_status`` action in the request
body.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- status: status_7
- os-reset_status: os-reset_status
- project_id: project_id_path
- backup_id: backup_id
Request Example
---------------
.. literalinclude:: ./samples/backup-reset-status-request.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/ext-backups.inc 0000664 0000000 0000000 00000020437 15131732575 0024471 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Backups (backups)
=================
A backup is a full copy of a volume stored in an external service.
The service can be configured. The only supported service is Object
Storage. A backup can subsequently be restored from the external
service to either the same volume that the backup was originally
taken from or to a new volume.
When you create, list, or delete backups, these status values are
possible:
**Backup statuses**
+-----------------+---------------------------------------------+
| Status | Description |
+-----------------+---------------------------------------------+
| creating | The backup is being created. |
+-----------------+---------------------------------------------+
| available | The backup is ready to restore to a volume. |
+-----------------+---------------------------------------------+
| deleting | The backup is being deleted. |
+-----------------+---------------------------------------------+
| error | A backup error occurred. |
+-----------------+---------------------------------------------+
| restoring | The backup is being restored to a volume. |
+-----------------+---------------------------------------------+
| error_deleting | An error occurred while deleting the backup.|
+-----------------+---------------------------------------------+
If an error occurs, you can find more information about the error
in the ``fail_reason`` field for the backup.
List backups with details
~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/backups/detail
Lists Block Storage backups, with details, to which the project has access.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
- sort_key: sort_key
- sort_dir: sort_dir
- limit: limit
- marker: marker
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- status: status_4
- object_count: object_count
- fail_reason: fail_reason
- description: description
- links: links_1
- availability_zone: availability_zone
- created_at: created_at
- updated_at: updated_at
- name: name_1
- has_dependent_backups: has_dependent_backups
- volume_id: volume_id
- container: container
- backups: backups
- size: size
- id: id_1
- is_incremental: is_incremental
- data_timestamp: data_timestamp
- snapshot_id: snapshot_id_2
Response Example
----------------
.. literalinclude:: ./samples/backups-list-detailed-response.json
:language: javascript
Show backup details
~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/backups/{backup_id}
Shows details for a backup.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- backup_id: backup_id
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- status: status_4
- object_count: object_count
- container: container
- description: description
- links: links_1
- availability_zone: availability_zone
- created_at: created_at
- updated_at: updated_at
- name: name_1
- has_dependent_backups: has_dependent_backups
- volume_id: volume_id
- fail_reason: fail_reason
- size: size
- backup: backup
- id: id_1
- is_incremental: is_incremental
- data_timestamp: data_timestamp
- snapshot_id: snapshot_id_2
Response Example
----------------
.. literalinclude:: ./samples/backup-show-response.json
:language: javascript
Delete backup
~~~~~~~~~~~~~
.. rest_method:: DELETE /v2/{project_id}/backups/{backup_id}
Deletes a backup.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- backup_id: backup_id
Restore backup
~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/backups/{backup_id}/restore
Restores a Block Storage backup to an existing or new Block Storage volume.
You must specify either the UUID or name of the volume. If you
specify both the UUID and name, the UUID takes priority.
If specifying ``volume_id`` the status of the volume must be ``available``.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 413
Request
-------
.. rest_parameters:: parameters.yaml
- restore: restore
- name: name_1
- volume_id: volume_id
- project_id: project_id_path
- backup_id: backup_id
Request Example
---------------
.. literalinclude:: ./samples/backup-restore-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- restore: restore
- backup_id: backup_id
- volume_id: volume_id
- volume_name: volume_name
Response Example
----------------
.. literalinclude:: ./samples/backup-restore-response.json
:language: javascript
Create backup
~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/backups
Creates a Block Storage backup from a volume.
The status of the volume must be ``available`` or if the ``force`` flag is
used, backups of ``in-use`` volumes may also be created.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 500
Request
-------
.. rest_parameters:: parameters.yaml
- container: container
- description: description
- incremental: incremental
- volume_id: volume_id
- force: force
- backup: backup
- name: name_1
- project_id: project_id_path
- snapshot_id: snapshot_id_2
Request Example
---------------
.. literalinclude:: ./samples/backup-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- backup: backup
- id: id_1
- links: links_1
- name: name_1
List backups
~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/backups
Lists Block Storage backups to which the project has access.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
- sort_key: sort_key
- sort_dir: sort_dir
- limit: limit
- marker: marker
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- backups: backups
- id: id_1
- links: links_1
- name: name_1
Response Example
----------------
.. literalinclude:: ./samples/backups-list-response.json
:language: javascript
Export backup
~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/backups/{backup_id}/export_record
Export information about a backup.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- backup_id: backup_id
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- backup-record: backup_record
- backup_service: backup_service
- backup_url: backup_url
Response Example
----------------
.. literalinclude:: ./samples/backup-record-export-response.json
:language: javascript
Import backup
~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/backups/import_record
Import information about a backup.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 201
.. rest_status_code:: error ../status.yaml
- 400
- 503
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- backup-record: backup_record
- backup_service: backup_service
- backup_url: backup_url
Request Example
---------------
.. literalinclude:: ./samples/backup-record-import-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- id: id_1
- links: links_1
- name: name_1
Response Example
----------------
.. literalinclude:: ./samples/backup-record-import-response.json
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/hosts.inc 0000664 0000000 0000000 00000003616 15131732575 0023403 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Hosts extension (os-hosts)
==========================
Administrators only, depending on policy settings.
Lists, shows hosts.
List all hosts
~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{admin_project_id}/os-hosts
Lists all hosts summary info that is not disabled.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 401
- 403
Request
-------
.. rest_parameters:: parameters.yaml
- admin_project_id: admin_project_id
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- service-status: host_service_status
- service: host_service
- zone: availability_zone_3
- service-state: service_state
- host_name: host_name_1
- last-update: updated_at
Response Example
----------------
.. literalinclude:: ./samples/hosts-list-response.json
:language: javascript
Show Host Details
~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{admin_project_id}/os-hosts/{host_name}
Shows volume and snapshot details for a cinder-volume host.
*Note:* This API is meant specifically for cinder-volume hosts only.
It is not valid against other Cinder service hosts or hosts where the
cinder-volume service has been disabled.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 401
- 403
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- admin_project_id: admin_project_id
- host_name: hostname
Response
--------
.. rest_parameters:: parameters.yaml
- volume_count: total_count_str
- total_volume_gb: totalGigabytesUsedStr
- total_snapshot_gb: totalSnapshotsUsedStr
- project: project_id_2
- host: host_name_1
- snapshot_count: totalSnapshotsUsed
**Example Show Host Details**
.. literalinclude:: ./samples/hosts-get-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/index.rst 0000664 0000000 0000000 00000003170 15131732575 0023404 0 ustar 00root root 0000000 0000000 :tocdepth: 2
==============================
Block Storage API V2 (REMOVED)
==============================
.. note::
Version 2 of the Block Storage API was `deprecated in the Pike release
`_
and was removed during the Xena development cycle. `This document is
maintained for historical purposes only.`
`Version 3
`_
of the Block Storage API was `introduced in the Mitaka release
`_. Version
3.0, which is the default microversion at the ``/v3`` endpoint, was
designed to be identical to version 2. Thus, scripts using the Block
Storage API v2 should be adaptable to version 3 with minimal changes.
.. rest_expand_all::
.. include:: api-versions.inc
.. include:: availability-zones-v2.inc
.. include:: ext-backups.inc
.. include:: ext-backups-actions-v2.inc
.. include:: capabilities-v2.inc
.. include:: os-cgsnapshots-v2.inc
.. include:: consistencygroups-v2.inc
.. include:: hosts.inc
.. include:: limits.inc
.. include:: os-vol-pool-v2.inc
.. include:: os-vol-transfer-v2.inc
.. include:: qos-specs-v2-qos-specs.inc
.. include:: quota-classes.inc
.. include:: quota-sets.inc
.. include:: volume-manage.inc
.. include:: volume-type-access.inc
.. include:: volumes-v2-extensions.inc
.. include:: volumes-v2-snapshots.inc
.. include:: volumes-v2-snapshots-actions.inc
.. include:: volumes-v2-types.inc
.. include:: volumes-v2-versions.inc
.. include:: volumes-v2-volumes-actions.inc
.. include:: volumes-v2-volumes.inc
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/limits.inc 0000664 0000000 0000000 00000002476 15131732575 0023547 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Limits (limits)
===============
Shows absolute limits for a project.
An absolute limit value of ``-1`` indicates that the absolute limit
for the item is infinite.
Show absolute limits
~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/limits
Shows absolute limits for a project.
An absolute limit value of ``-1`` indicates that the absolute limit
for the item is infinite.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 403
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- totalSnapshotsUsed: totalSnapshotsUsed
- maxTotalBackups: maxTotalBackups
- maxTotalVolumeGigabytes: maxTotalVolumeGigabytes
- limits: limits
- maxTotalSnapshots: maxTotalSnapshots
- maxTotalBackupGigabytes: maxTotalBackupGigabytes
- totalBackupGigabytesUsed: totalBackupGigabytesUsed
- maxTotalVolumes: maxTotalVolumes
- totalVolumesUsed: totalVolumesUsed
- rate: rate
- totalBackupsUsed: totalBackupsUsed
- totalGigabytesUsed: totalGigabytesUsed
- absolute: absolute
Response Example
----------------
.. literalinclude:: ./samples/limits-show-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/os-cgsnapshots-v2.inc 0000664 0000000 0000000 00000006530 15131732575 0025541 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Consistency group snapshots
===========================
Lists all, lists all with details, shows details for, creates, and
deletes consistency group snapshots.
Delete consistency group snapshot
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: DELETE /v2/{project_id}/cgsnapshots/{cgsnapshot_id}
Deletes a consistency group snapshot.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- cgsnapshot_id: cgsnapshot_id_1
Show consistency group snapshot details
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/cgsnapshots/{cgsnapshot_id}
Shows details for a consistency group snapshot.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- cgsnapshot_id: cgsnapshot_id_1
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- status: status
- description: description
- created_at: created_at
- consistencygroup_id: consistencygroup_id
- id: id
- name: name
Response Example
----------------
.. literalinclude:: ./samples/cgsnapshots-show-response.json
:language: javascript
List consistency group snapshots with details
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/cgsnapshots/detail
Lists all consistency group snapshots with details.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- status: status
- description: description
- created_at: created_at
- consistencygroup_id: consistencygroup_id
- id: id
- name: name
Response Example
----------------
.. literalinclude:: ./samples/cgsnapshots-list-detailed-response.json
:language: javascript
List consistency group snapshots
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/cgsnapshots
Lists all consistency group snapshots.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- id: id
- name: name
Response Example
----------------
.. literalinclude:: ./samples/cgsnapshots-list-response.json
:language: javascript
Create consistency group snapshot
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/cgsnapshots
Creates a consistency group snapshot.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- name: name
- project_id: project_id_path
Request Example
---------------
.. literalinclude:: ./samples/cgsnapshots-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- status: status
- description: description
- created_at: created_at
- consistencygroup_id: consistencygroup_id
- id: id
- name: name
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/os-vol-pool-v2.inc 0000664 0000000 0000000 00000002050 15131732575 0024745 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Back-end storage pools
======================
Administrator only. Lists all back-end storage pools that are known
to the scheduler service.
List back-end storage pools
~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/scheduler-stats/get_pools
Lists all back-end storage pools.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- detail: detail
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- updated: updated
- QoS_support: QoS_support
- name: name_16
- total_capacity_gb: total_capacity
- volume_backend_name: volume_backend_name
- capabilities: capabilities
- free_capacity_gb: free_capacity
- driver_version: driver_version
- reserved_percentage: reserved_percentage
- storage_protocol: storage_protocol
Response Example
----------------
.. literalinclude:: ./samples/pools-list-detailed-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/os-vol-transfer-v2.inc 0000664 0000000 0000000 00000010353 15131732575 0025625 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Volume transfer
===============
Transfers a volume from one user to another user.
Accept volume transfer
~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/os-volume-transfer/{transfer_id}/accept
Accepts a volume transfer.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- auth_key: auth_key
- transfer: transfer
- transfer_id: transfer_id
- project_id: project_id_path
Request Example
---------------
.. literalinclude:: ./samples/volume-transfer-accept-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- volume_id: volume_id
- id: id
- links: links
- name: name
Response Example
----------------
.. literalinclude:: ./samples/volume-transfer-accept-response.json
:language: javascript
Create volume transfer
~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/os-volume-transfer
Creates a volume transfer.
**Preconditions**
* The volume ``status`` must be ``available``
* Transferring encrypted volumes is not supported
* If the volume has snapshots, those snapshots must be ``available``
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- transfer: transfer
- name: name
- volume_id: volume_id
- project_id: project_id_path
Request Example
---------------
.. literalinclude:: ./samples/volume-transfer-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- auth_key: auth_key
- links: links
- created_at: created_at
- volume_id: volume_id
- id: id
- name: name
Response Example
----------------
.. literalinclude:: ./samples/volume-transfer-create-response.json
:language: javascript
List volume transfers
~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/os-volume-transfer
Lists volume transfers.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- volume_id: volume_id
- id: id
- links: links
- name: name
Response Example
----------------
.. literalinclude:: ./samples/volume-transfers-list-response.json
:language: javascript
Show volume transfer details
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/os-volume-transfer/{transfer_id}
Shows details for a volume transfer.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- transfer_id: transfer_id
- project_id: project_id_path
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- created_at: created_at
- volume_id: volume_id
- id: id
- links: links
- name: name
Response Example
----------------
.. literalinclude:: ./samples/volume-transfer-show-response.json
:language: javascript
Delete volume transfer
~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: DELETE /v2/{project_id}/os-volume-transfer/{transfer_id}
Deletes a volume transfer.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- transfer_id: transfer_id
- project_id: project_id_path
List volume transfers, with details
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/os-volume-transfer/detail
Lists volume transfers, with details.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- created_at: created_at
- volume_id: volume_id
- id: id
- links: links
- name: name
Response Example
----------------
.. literalinclude:: ./samples/volume-transfers-list-detailed-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/parameters.yaml 0000664 0000000 0000000 00000125313 15131732575 0024576 0 ustar 00root root 0000000 0000000 # variables in header
x-openstack-request-id:
description: >
foo
in: header
required: false
type: string
# variables in path
admin_project_id:
description: |
The UUID of the administrative project.
in: path
required: true
type: string
backup_id:
description: |
The UUID for a backup.
in: path
required: true
type: string
cascade:
description: |
Remove any snapshots along with the volume. Default=False.
in: path
required: false
type: boolean
cgsnapshot_id_1:
description: |
The ID of the consistency group snapshot.
in: path
required: true
type: string
encryption_id:
description: |
The ID of the encryption type.
in: path
required: true
type: string
hostname:
description: |
The name of the host that hosts the storage back
end.
in: path
required: true
type: string
key_1:
description: |
The metadata key name for the metadata that you
want to remove.
in: path
required: true
type: string
key_2:
description: |
The metadata key name for the metadata that you
want to see.
in: path
required: true
type: string
key_3:
description: |
The metadata key name for the metadata that you
want to update.
in: path
required: true
type: string
project_id_path:
description: |
The UUID of the project in a multi-tenancy cloud.
in: path
required: true
type: string
qos_id:
description: |
The ID of the QoS specification.
in: path
required: true
type: string
quota_class_name:
description:
The name of the quota class for which to set quotas.
in: path
required: true
type: string
quotas_project_id:
description: |
The UUID of the project in a multi-tenancy cloud.
in: path
required: true
type: string
transfer_id:
description: |
The unique identifier for a volume transfer.
in: path
required: false
type: string
vol_type_id:
description: |
The UUID for an existing volume type.
in: path
required: true
type: string
volume_id_path:
description: |
The UUID of the volume.
in: path
required: true
type: string
volume_type:
description: |
The ID of Volume Type to be accessed by project.
in: path
required: false
type: string
volume_type_access:
description: |
The ID of Volume Type to be accessed by project.
in: path
required: true
type: string
volume_type_id:
description: |
The UUID for an existing volume type.
in: path
required: false
type: string
# variables in query
action:
description: |
The action. Valid values are "set" or "unset."
in: query
required: true
type: string
all-tenants:
description: |
Shows details for all projects. Admin only.
in: query
required: false
type: string
bootable_query:
description: |
Filters results by bootable status. Default=None.
in: query
required: false
type: boolean
detail:
description: |
Indicates whether to show pool details or only
pool names in the response. Set to ``true`` to show pool details.
Set to ``false`` to show only pool names. Default is ``false``.
in: query
required: false
type: boolean
image-id:
description: |
Creates volume from image ID. Default=None.
in: query
required: false
type: string
limit:
description: |
Requests a page size of items. Returns a number
of items up to a limit value. Use the ``limit`` parameter to make
an initial limited request and use the ID of the last-seen item
from the response as the ``marker`` parameter value in a
subsequent limited request.
in: query
required: false
type: integer
marker:
description: |
The ID of the last-seen item. Use the ``limit``
parameter to make an initial limited request and use the ID of the
last-seen item from the response as the ``marker`` parameter value
in a subsequent limited request.
in: query
required: false
type: string
name_volume:
description: |
Filters results by a name. Default=None.
in: query
required: false
type: string
offset:
description: |
Used in conjunction with ``limit`` to return a slice of items. ``offset``
is where to start in the list.
in: query
required: false
type: integer
sort:
description: |
Comma-separated list of sort keys and optional
sort directions in the form of < key > [: < direction > ]. A valid
direction is ``asc`` (ascending) or ``desc`` (descending).
in: query
required: false
type: string
sort_dir:
description: |
Sorts by one or more sets of attribute and sort
direction combinations. If you omit the sort direction in a set,
default is ``desc``.
in: query
required: false
type: string
sort_key:
description: |
Sorts by an attribute. A valid value is ``name``,
``status``, ``container_format``, ``disk_format``, ``size``,
``id``, ``created_at``, or ``updated_at``. Default is
``created_at``. The API uses the natural sorting direction of the
``sort_key`` attribute value.
in: query
required: false
type: string
usage:
description: |
Set to ``usage=true`` to show quota usage.
Default is ``false``.
in: query
required: false
type: boolean
# variables in body
absolute:
description: |
An ``absolute`` limits object.
in: body
required: true
type: object
add_volumes:
description: |
One or more volume UUIDs, separated by commas, to
add to the volume consistency group.
in: body
required: false
type: string
alias:
description: |
The alias for the extension. For example,
"FOXNSOX", "os- availability-zone", "os-extended-quotas", "os-
share-unmanage" or "os-used-limits."
in: body
required: true
type: string
attach_status:
description: |
The volume attach status.
in: body
required: false
type: string
attachment_id:
description: |
The interface ID.
in: body
required: false
type: string
attachments:
description: |
Instance attachment information. If this volume
is attached to a server instance, the attachments list includes
the UUID of the attached server, an attachment UUID, the name of
the attached host, if any, the volume UUID, the device, and the
device UUID. Otherwise, this list is empty.
in: body
required: true
type: array
auth_key:
description: |
The authentication key for the volume transfer.
in: body
required: true
type: string
availability_zone:
description: |
The name of the availability zone.
in: body
required: false
type: string
availability_zone_3:
description: |
The availability zone name.
in: body
required: true
type: string
availability_zone_info:
description: |
The list of availability zone information.
in: body
required: true
type: array
availability_zone_state:
description: |
The current state of the availability zone.
in: body
required: true
type: object
available:
description: |
Whether the availability zone is available for use.
in: body
required: true
type: boolean
backup:
description: |
A ``backup`` object.
in: body
required: true
type: object
backup_gigabytes:
description: |
The size (GB) of backups that are allowed for each project.
in: body
required: true
type: integer
backup_record:
description: |
An object recording volume backup metadata, including ``backup_service``
and ``backup_url``.
in: body
required: true
type: object
backup_service:
description: |
The service used to perform the backup.
in: body
required: true
type: string
backup_url:
description: |
An identifier string to locate the backup.
in: body
required: true
type: string
backups:
description: |
A list of ``backup`` objects.
in: body
required: true
type: array
backups_number:
description: |
The number of backups that are allowed for each project.
in: body
required: true
type: integer
bootable:
description: |
Enables or disables the bootable attribute. You
can boot an instance from a bootable volume.
in: body
required: true
type: boolean
bootable_response:
description: |
Enables or disables the bootable attribute. You
can boot an instance from a bootable volume.
in: body
required: true
type: string
capabilities:
description: |
The capabilities for the back end. The value is
either ``null`` or a string value that indicates the capabilities
for each pool. For example, ``total_capacity_gb`` or ``QoS_support``.
in: body
required: true
type: object
cgsnapshot_id:
description: |
The UUID of the consistency group snapshot.
in: body
required: false
type: string
cipher:
description: |
The encryption algorithm or mode. For example, aes-xts-plain64. The default
value is None.
in: body
required: false
type: string
connector:
description: |
The ``connector`` object.
in: body
required: false
type: object
consistencygroup_id:
description: |
The UUID of the consistency group.
in: body
required: true
type: string
consistencygroup_id_1:
description: |
The UUID of the consistency group.
in: body
required: false
type: string
consumer:
description: |
The consumer type.
in: body
required: false
type: string
consumer_1:
description: |
The consumer type.
in: body
required: true
type: string
container:
description: |
The container name or null.
in: body
required: false
type: string
control_location:
description: |
Notional service where encryption is performed. Valid values are
"front-end" or "back-end". The default value is "front-end".
in: body
required: false
type: string
cores:
description: |
The number of instance cores that are allowed for
each project.
in: body
required: true
type: integer
created_at:
description: |
The date and time when the resource was created.
The date and time stamp format is `ISO 8601
`_:
::
CCYY-MM-DDThh:mm:ss±hh:mm
For example, ``2015-08-27T09:49:58-05:00``.
The ``±hh:mm`` value, if included, is the time zone as an offset
from UTC.
in: body
required: true
type: string
created_at_1:
description: |
Date and time when the volume was created.
in: body
required: true
type: string
data_timestamp:
description: |
The time when the data on the volume was first saved. If it is
a backup from volume, it will be the same as ``created_at``
for a backup. If it is a backup from a snapshot, it will be the
same as ``created_at`` for the snapshot.
in: body
required: true
type: string
deleted:
description: |
The resource is deleted or not.
in: body
required: true
type: boolean
deleted_at:
description: |
The date and time when the resource was deleted.
The date and time stamp format is `ISO 8601
`_:
::
CCYY-MM-DDThh:mm:ss±hh:mm
For example, ``2015-08-27T09:49:58-05:00``.
The ``±hh:mm`` value, if included, is the time zone as an offset
from UTC. In the previous example, the offset value is ``-05:00``.
If the ``deleted_at`` date and time stamp is not set, its value is
``null``.
in: body
required: true
type: string
description:
description: |
The backup description or null.
in: body
required: false
type: string
description_1:
description: |
The consistency group snapshot description.
in: body
required: true
type: string
description_10:
description: |
The capabilities description.
in: body
required: true
type: string
description_11:
description: |
The consistency group description.
in: body
required: false
type: string
description_2:
description: |
The description of the consistency group.
in: body
required: false
type: string
description_3:
description: |
The description of the consistency group.
in: body
required: true
type: string
description_4:
description: |
A description for the snapshot. Default is
``None``.
in: body
required: false
type: string
description_5:
description: |
The volume description.
in: body
required: false
type: string
description_6:
description: |
The consistency group description.
in: body
required: true
type: string
description_7:
description: |
The extension description.
in: body
required: true
type: string
description_8:
description: |
A description for the snapshot.
in: body
required: true
type: string
description_9:
description: |
The volume description.
in: body
required: true
type: string
display_name:
description: |
The name of volume backend capabilities.
in: body
required: true
type: string
driver_version:
description: |
The driver version.
in: body
required: true
type: string
encrypted:
description: |
If true, this volume is encrypted.
in: body
required: true
type: boolean
encryption:
description: |
The encryption information.
in: body
required: true
type: object
encryption_id_body:
description: |
The UUID of the encryption.
in: body
required: true
type: string
extra_specs:
description: |
A set of key and value pairs that contains the
specifications for a volume type.
in: body
required: true
type: object
fail_reason:
description: |
If the backup failed, the reason for the failure.
Otherwise, null.
in: body
required: true
type: string
fixed_ips:
description: |
The number of fixed IP addresses that are allowed
for each project. Must be equal to or greater than the number of
allowed instances.
in: body
required: true
type: integer
floating_ips:
description: |
The number of floating IP addresses that are
allowed for each project.
in: body
required: true
type: integer
force:
description: |
Indicates whether to backup, even if the volume
is attached. Default is ``false``.
in: body
required: false
type: boolean
force_1:
description: |
Indicates whether to snapshot, even if the volume
is attached. Default is ``false``.
in: body
required: false
type: boolean
force_2:
description: |
If set to ``true``, forces deletion of a
consistency group that has a registered volume.
in: body
required: false
type: boolean
free_capacity:
description: |
The amount of free capacity for the back-end
volume, in GBs. A valid value is a string, such as ``unknown``, or
a number (integer or floating point).
in: body
required: true
type: string
gigabytes:
description: |
The size (GB) of volumes and snapshots that are allowed for each project.
in: body
required: true
type: integer
gigabytes_for_type:
description: |
The size (GB) of volumes and snapshots that are allowed for each project
and the specified volume type.
in: body
required: true
type: integer
groups_number:
description: |
The number of groups that are allowed for each project.
in: body
required: true
type: integer
has_dependent_backups:
description: |
If this value is ``true``, there are other backups depending on
this backup.
in: body
required: false
type: boolean
host:
description: |
The OpenStack Block Storage host where the
existing volume resides.
in: body
required: true
type: string
host_name:
description: |
The name of the attaching host.
in: body
required: false
type: string
host_name_1:
description: |
The name of the host that hosts the storage backend,
may take the format of ``host@backend``.
in: body
required: true
type: string
host_service:
description: |
The name of the service which is running on the host.
in: body
required: true
type: string
host_service_status:
description: |
The status of the service. One of ``available`` or ``unavailable``.
in: body
required: true
type: string
id:
description: |
The UUID of the volume transfer.
in: body
required: true
type: string
id_1:
description: |
The UUID of the backup.
in: body
required: true
type: string
id_2:
description: |
The UUID of the consistency group snapshot.
in: body
required: true
type: string
id_3:
description: |
The generated ID for the QoS specification.
in: body
required: true
type: string
id_4:
description: |
The snapshot UUID.
in: body
required: true
type: string
id_5:
description: |
The UUID of the volume.
in: body
required: true
type: string
id_6:
description: |
The UUID of the consistency group.
in: body
required: true
type: string
id_7:
description: |
The ID for the quota set.
in: body
required: true
type: integer
imageRef:
description: |
The UUID of the image from which you want to
create the volume. Required to create a bootable volume.
in: body
required: false
type: string
in_use:
description: |
The in use data size. Visible only if you set the
``usage=true`` query parameter.
in: body
required: false
type: string
incremental:
description: |
The backup mode. A valid value is ``true`` for
incremental backup mode or ``false`` for full backup mode. Default
is ``false``.
in: body
required: false
type: boolean
injected_file_content_bytes:
description: |
The number of bytes of content that are allowed
for each injected file.
in: body
required: true
type: integer
injected_file_path_bytes:
description: |
The number of bytes that are allowed for each
injected file path.
in: body
required: true
type: integer
injected_files:
description: |
The number of injected files that are allowed for
each project.
in: body
required: true
type: integer
instance_uuid:
description: |
The UUID of the attaching instance.
in: body
required: false
type: string
instances:
description: |
The number of instances that are allowed for each
project.
in: body
required: true
type: integer
is_incremental:
description: |
Indicates whether the backup mode is incremental.
If this value is ``true``, the backup mode is incremental. If this
value is ``false``, the backup mode is full.
in: body
required: false
type: boolean
is_public:
description:
Volume type which is accessible to the public.
in: body
required: false
type: boolean
key:
description: |
The metadata key name for the metadata that you
want to remove.
in: body
required: true
type: string
key_pairs:
description: |
The number of key pairs that are allowed for each
user.
in: body
required: true
type: integer
key_size:
description: |
Size of encryption key, in bits. For example, 128 or 256. The default value
is None.
in: body
required: false
type: integer
keys:
description: |
List of Keys.
in: body
required: true
type: array
limits:
description: |
A list of ``limit`` objects.
in: body
required: true
type: object
links:
description: |
Links for the volume transfer.
in: body
required: true
type: array
links_1:
description: |
Links for the backup.
in: body
required: true
type: array
links_2:
description: |
The QoS specification links.
in: body
required: true
type: array
links_3:
description: |
The volume links.
in: body
required: true
type: array
links_4:
description: |
List of links related to the extension.
in: body
required: true
type: array
links_5:
description: |
List of links related to the extension.
in: body
required: true
type: array
links_vol_optional:
description: |
The volume links.
in: body
required: false
type: array
location:
description: |
Full URL to a service or server.
format: uri
in: body
required: true
type: string
maxTotalBackupGigabytes:
description: |
The maximum total amount of backups, in gibibytes
(GiB).
in: body
required: true
type: integer
maxTotalBackups:
description: |
The maximum number of backups.
in: body
required: true
type: integer
maxTotalGroups:
description: |
The maximum number of groups.
in: body
required: true
type: integer
maxTotalSnapshots:
description: |
The maximum number of snapshots.
in: body
required: true
type: integer
maxTotalSnapshotsOptional:
description: |
The maximum number of snapshots.
in: body
required: false
type: integer
maxTotalVolumeGigabytes:
description: |
The maximum total amount of volumes, in gibibytes (GiB).
in: body
required: true
type: integer
maxTotalVolumeGigabytesOptional:
description: |
The maximum total amount of volumes, in gibibytes (GiB).
in: body
required: true
type: integer
maxTotalVolumes:
description: |
The maximum number of volumes.
in: body
required: true
type: integer
maxTotalVolumesOptional:
description: |
The maximum number of volumes.
in: body
required: false
type: integer
meta:
description: |
The metadata key and value pair for the volume.
in: body
required: true
type: object
metadata:
description: |
One or more metadata key and value pairs for the
snapshot, if any.
in: body
required: true
type: object
metadata_1:
description: |
A ``metadata`` object. Contains one or more
metadata key and value pairs that are associated with the volume.
in: body
required: true
type: object
metadata_2:
description: |
One or more metadata key and value pairs that are
associated with the volume.
in: body
required: false
type: object
metadata_3:
description: |
One or more metadata key and value pairs that are
associated with the volume.
in: body
required: true
type: object
metadata_4:
description: |
One or more metadata key and value pairs to
associate with the volume.
in: body
required: false
type: string
metadata_5:
description: |
The image metadata to add to the volume as a set
of metadata key and value pairs.
in: body
required: true
type: object
metadata_6:
description: |
One or more metadata key and value pairs to
associate with the volume.
in: body
required: false
type: object
metadata_7:
description: |
One or more metadata key and value pairs for the
snapshot.
in: body
required: false
type: object
metadata_8:
description: |
The image metadata key value pairs.
in: body
required: true
type: object
metadata_items:
description: |
The number of metadata items that are allowed for
each instance.
in: body
required: true
type: integer
migrate_force_host_copy:
description: |
If false (the default), rely on the volume backend driver to perform
the migration, which might be optimized. If true, or the volume driver
fails to migrate the volume itself, a generic host-based migration is
performed.
in: body
required: false
type: boolean
migrate_host:
description: |
The target host for the volume migration. Host format is ``host@backend``.
in: body
required: true
type: string
migrate_lock_volume:
description: |
If true, migrating an ``available`` volume will change its status to
``maintenance`` preventing other operations from being performed on the
volume such as attach, detach, retype, etc.
in: body
required: false
type: boolean
migration_completion_error:
description: |
Used to indicate if an error has occured elsewhere that requires clean up.
in: body
required: false
type: boolean
# NOTE(mriedem): We can update the migration_policy retype note about encrypted
# in-use volumes not being supported once
# https://bugzilla.redhat.com/show_bug.cgi?id=760547 is fixed.
migration_policy:
description: |
Specify if the volume should be migrated when it is re-typed.
Possible values are ``on-demand`` or ``never``. If not specified, the
default is ``never``.
.. note:: If the volume is attached to a server instance and will be
migrated, then by default policy only users with the administrative role
should attempt the retype operation. A retype which involves a migration
to a new host for an *in-use* encrypted volume is not supported.
in: body
required: false
type: string
migration_status:
description: |
The volume migration status. Admin only.
in: body
required: false
type: string
mountpoint:
description: |
The attaching mount point.
in: body
required: true
type: string
multiattach_resp:
description: |
If true, this volume can attach to more than one
instance.
in: body
required: true
type: boolean
name:
description: |
The name of the Volume Transfer.
in: body
required: true
type: string
name_1:
description: |
The backup name.
in: body
required: true
type: string
name_10:
description: |
The name of the extension. For example, "Fox In
Socks."
in: body
required: true
type: string
name_11:
description: |
The name of the back-end volume.
in: body
required: true
type: string
name_12:
description: |
The name of the snapshot.
in: body
required: true
type: string
name_13:
description: |
The volume name.
in: body
required: true
type: string
name_14:
description: |
The name of the volume to which you want to
restore a backup.
in: body
required: false
type: string
name_15:
description: |
The consistency group name.
in: body
required: false
type: string
name_16:
description: |
The name of the backend pool.
in: body
required: true
type: string
name_2:
description: |
The consistency group snapshot name.
in: body
required: true
type: string
name_3:
description: |
The name of the consistency group.
in: body
required: true
type: string
name_4:
description: |
The name of the QoS specification.
in: body
required: true
type: string
name_5:
description: |
The name of the snapshot. Default is ``None``.
in: body
required: false
type: string
name_6:
description: |
The volume transfer name.
in: body
required: false
type: string
name_7:
description: |
The name of the volume type.
in: body
required: true
type: string
name_9:
description: |
The consistency group name.
in: body
required: true
type: string
namespace:
description: |
Link associated to the extension.
in: body
required: true
type: string
namespace_1:
description: |
The storage namespace, such as
``OS::Storage::Capabilities::foo``.
in: body
required: true
type: string
new_size:
description: |
The new size of the volume, in gibibytes (GiB).
in: body
required: true
type: integer
new_type:
description: |
The new volume type that volume is changed with.
in: body
required: true
type: string
new_volume:
description: |
The UUID of the new volume.
in: body
required: true
type: string
object_count:
description: |
The number of objects in the backup.
in: body
required: true
type: integer
os-attach:
description: |
The ``os-attach`` action.
in: body
required: true
type: object
os-detach:
description: |
The ``os-detach`` action.
in: body
required: true
type: object
os-ext-snap-attr:progress:
description: |
A percentage value for the build progress.
in: body
required: true
type: string
os-ext-snap-attr:project_id:
description: |
The UUID of the owning project.
in: body
required: true
type: string
os-extend:
description: |
The ``os-extend`` action.
in: body
required: true
type: object
os-force_delete:
description: |
The ``os-force_delete`` action.
in: body
required: true
type: string
os-force_detach:
description: |
The ``os-force_detach`` action.
in: body
required: true
type: object
os-migrate_volume:
description: |
The ``os-migrate_volume`` action.
in: body
required: true
type: object
os-migrate_volume_completion:
description: |
The ``os-migrate_volume_completion`` action.
in: body
required: true
type: object
os-reset_status:
description: |
The ``os-reset_status`` action.
in: body
required: true
type: object
os-retype:
description: |
The ``os-retype`` action.
in: body
required: true
type: object
OS-SCH-HNT:scheduler_hints:
description: |
The dictionary of data to send to the scheduler.
in: body
required: false
type: object
os-set_bootable:
description: |
The ``os-set_bootable`` action.
in: body
required: true
type: object
os-set_image_metadata:
description: |
The ``os-set_image_metadata`` action.
in: body
required: true
type: object
os-show_image_metadata:
description: |
The ``os-show_image_metadata`` action.
in: body
require: true
type: object
os-unmanage:
description: |
The ``os-unmanage`` action. This action removes
the specified volume from Cinder management.
in: body
required: true
type: object
os-unset_image_metadata:
description: |
The ``os-unset_image_metadata`` action. This
action removes the key-value pairs from the image metadata.
in: body
required: true
type: object
os-vol-host-attr:host:
description: |
Current back-end of the volume.
Host format is ``host@backend#pool``.
in: body
required: false
type: string
os-vol-mig-status-attr:migstat:
description: |
The status of this volume migration (None means
that a migration is not currently in progress).
in: body
required: false
type: string
os-vol-mig-status-attr:name_id:
description: |
The volume ID that this volume name on the back-
end is based on.
in: body
required: false
type: string
os-vol-tenant-attr:tenant_id:
description: |
The project ID which the volume belongs to.
in: body
required: true
type: string
per_volume_gigabytes:
description: |
The size (GB) of volumes in request that are allowed for each
volume.
in: body
required: true
type: integer
perVolumeGigabytes:
description: |
The maximum amount of storage per volume, in gibibytes (GiB).
in: body
required: true
type: integer
pool_name:
description: |
The name of the storage pool.
in: body
required: true
type: string
project:
description: |
The ID of the project. Volume Type access to be
added to this project ID.
in: body
required: true
type: string
project_id:
description: |
The UUID of the project.
in: body
required: true
type: string
project_id_1:
description: |
The Project ID having access to this volume type.
in: body
required: true
type: string
project_id_2:
description: |
The UUID of the project which the host resource belongs to.
In the summary resource, the value is ``(total)``.
in: body
required: true
type: string
properties:
description: |
The backend volume capabilities list, which is
consisted of cinder standard capabilities and vendor unique
properties.
in: body
required: true
type: object
provider:
description: |
The class that provides encryption support.
in: body
required: true
type: string
provider_optional:
description: |
The class that provides encryption support.
in: body
required: false
type: string
qos_specs:
description: |
A ``qos_specs`` object.
in: body
required: true
type: object
QoS_support:
description: |
The quality of service (QoS) support.
in: body
required: true
type: boolean
quota_class_id:
description:
The name of the quota class set.
in: body
required: true
type: string
quota_set:
description: |
A ``quota_set`` object.
in: body
required: true
type: object
ram:
description: |
The amount of instance RAM in megabytes that are
allowed for each project.
in: body
required: true
type: integer
rate:
description: |
Rate-limit volume copy bandwidth, used to
mitigate slow down of data access from the instances.
in: body
required: true
type: array
ref:
description: |
A reference to the existing volume. The internal
structure of this reference depends on the volume driver
implementation. For details about the required elements in the
structure, see the documentation for the volume driver.
in: body
required: true
type: string
ref_1:
description: |
A reference to the existing volume. The internal
structure of this reference is dependent on the implementation of
the volume driver, see the specific driver's documentation for
details of the required elements in the structure.
in: body
required: true
type: object
remove_volumes:
description: |
One or more volume UUIDs, separated by commas, to
remove from the volume consistency group.
in: body
required: false
type: string
replication_status:
description: |
The volume replication status.
in: body
required: true
type: string
replication_targets:
description: |
A list of volume backends used to replicate volumes
on this backend.
in: body
required: true
type: list
reserved:
description: |
Reserved volume size. Visible only if you set the
``usage=true`` query parameter.
in: body
required: false
type: integer
reserved_percentage:
description: |
The percentage of the total capacity that is
reserved for the internal use by the back end.
in: body
required: true
type: integer
restore:
description: |
A ``restore`` object.
in: body
required: true
type: object
security_group_rules:
description: |
The number of rules that are allowed for each
security group.
in: body
required: false
type: integer
security_groups:
description: |
The number of security groups that are allowed
for each project.
in: body
required: true
type: integer
service_state:
description: |
The state of the service. One of ``enabled`` or ``disabled``.
in: body
required: true
type: string
service_status:
description: |
The status of the service. One of ``enabled`` or ``disabled``.
in: body
required: true
type: string
size:
description: |
The size of the volume, in gibibytes (GiB).
in: body
required: true
type: integer
snapshot:
description: |
A partial representation of a snapshot used in
the creation process.
in: body
required: true
type: string
snapshot_id:
description: |
To create a volume from an existing snapshot,
specify the UUID of the volume snapshot. The volume is created in
same availability zone and with same size as the snapshot.
in: body
required: false
type: string
snapshot_id_2:
description: |
The UUID of the source volume snapshot.
in: body
required: false
type: string
snapshots_number:
description: |
The number of snapshots that are allowed for each project.
in: body
required: true
type: integer
snapshots_number_for_type:
description: |
The number of snapshots that are allowed for each project and
the specified volume type.
in: body
required: true
type: integer
source_cgid:
description: |
The UUID of the source consistency group.
in: body
required: false
type: string
source_volid:
description: |
The UUID of the source volume. The API creates a new volume with the same
size as the source volume unless a larger size is requested.
in: body
required: false
type: string
specs:
description: |
A ``specs`` object.
in: body
required: true
type: object
status:
description: |
The ``status`` of the consistency group snapshot.
in: body
required: false
type: string
status_1:
description: |
The status of the consistency group.
in: body
required: true
type: string
status_2:
description: |
The status for the snapshot.
in: body
required: true
type: string
status_3:
description: |
The volume status.
in: body
required: true
type: string
status_4:
description: |
The backup status. Refer to Backup statuses table
for the possible status value.
in: body
required: true
type: string
status_7:
description: |
The status for the backup.
in: body
required: true
type: string
storage_protocol:
description: |
The storage back end for the back-end volume. For
example, ``iSCSI`` or ``FC``.
in: body
required: true
type: string
total_capacity:
description: |
The total capacity for the back-end volume, in
GBs. A valid value is a string, such as ``unknown``, or a
number (integer or floating point).
in: body
required: true
type: string
total_count:
description: |
Total number of volumes.
in: body
required: true
type: integer
total_count_str:
description: |
Total number of volumes.
in: body
required: true
type: string
totalBackupGigabytesUsed:
description: |
The total number of backups gibibytes (GiB) used.
in: body
required: true
type: integer
totalBackupsUsed:
description: |
The total number of backups used.
in: body
required: true
type: integer
totalGigabytesUsed:
description: |
The total number of gibibytes (GiB) used.
in: body
required: true
type: integer
totalGigabytesUsedStr:
description: |
The total number of gibibytes (GiB) used.
in: body
required: true
type: string
totalSnapshotsUsed:
description: |
The total number of snapshots used.
in: body
required: true
type: integer
totalSnapshotsUsedStr:
description: |
The total number of snapshots used.
in: body
required: true
type: string
totalVolumesUsed:
description: |
The total number of volumes used.
in: body
required: true
type: integer
transfer:
description: |
The volume transfer object.
in: body
required: true
type: object
updated:
description: |
The date and time stamp when the extension was
last updated.
in: body
required: true
type: string
updated_1:
description: |
The date and time stamp when the API request was
issued.
in: body
required: true
type: string
updated_at:
description: |
The date and time when the resource was updated.
The date and time stamp format is `ISO 8601
`_:
::
CCYY-MM-DDThh:mm:ss±hh:mm
For example, ``2015-08-27T09:49:58-05:00``.
The ``±hh:mm`` value, if included, is the time zone as an offset
from UTC. In the previous example, the offset value is ``-05:00``.
If the ``updated_at`` date and time stamp is not set, its value is
``null``.
in: body
required: true
type: string
user_id:
description: |
The UUID of the user.
in: body
required: true
type: string
vendor_name:
description: |
The name of the vendor.
in: body
required: true
type: string
visibility:
description: |
The volume type access.
in: body
required: true
type: string
volume:
description: |
A ``volume`` object.
in: body
required: true
type: object
volume_1:
description: |
A ``volume`` object.
in: body
required: true
type: string
volume_backend_name:
description: |
The name of the back-end volume.
in: body
required: true
type: string
volume_id:
description: |
The UUID of the volume.
in: body
required: true
type: string
volume_id_2:
description: |
The UUID of the volume that you want to back up.
in: body
required: true
type: string
volume_id_3:
description: |
To create a snapshot from an existing volume,
specify the UUID of the existing volume.
in: body
required: true
type: string
volume_id_4:
description: |
The UUID of the volume from which the backup was
created.
in: body
required: true
type: string
volume_id_5:
description: |
If the snapshot was created from a volume, the
volume ID.
in: body
required: true
type: string
volume_id_6:
description: |
The UUID of the volume to which you want to
restore a backup.
in: body
required: false
type: string
volume_image_metadata:
description: |
List of image metadata entries. Only included for volumes that were
created from an image, or from a snapshot of a volume originally created
from an image.
in: body
required: false
type: object
volume_name:
description: |
The volume name.
in: body
required: true
type: string
volume_name_optional:
description: |
The volume name.
in: body
required: false
type: string
volume_type_1:
description: |
A ``volume_type`` object.
in: body
required: true
type: object
volume_type_2:
description: |
The volume type (either name or ID). To create an environment with
multiple-storage back ends, you must specify a volume type. Block
Storage volume back ends are spawned as children to ``cinder-
volume``, and they are keyed from a unique queue. They are named
``cinder- volume.HOST.BACKEND``. For example, ``cinder-
volume.ubuntu.lvmdriver``. When a volume is created, the scheduler
chooses an appropriate back end to handle the request based on the
volume type. Default is ``None``. For information about how to
use volume types to create multiple- storage back ends, see
`Configure multiple-storage back ends
`_.
in: body
required: false
type: string
volume_type_3:
description: |
The volume type. In an environment with multiple-
storage back ends, the scheduler determines where to send the
volume based on the volume type. For information about how to use
volume types to create multiple- storage back ends, see `Configure
multiple-storage back ends `_.
in: body
required: true
type: string
volume_type_4:
description: |
The associated volume type.
in: body
required: false
type: string
volume_type_5:
description: |
A list of ``volume_type`` objects.
in: body
required: true
type: array
volume_type_id_body:
description: |
The UUID of the volume type.
in: body
required: true
type: string
volume_types:
description: |
The list of volume types. In an environment with
multiple-storage back ends, the scheduler determines where to send
the volume based on the volume type. For information about how to
use volume types to create multiple- storage back ends, see
`Configure multiple-storage back ends
`_.
in: body
required: true
type: array
volume_types_2:
description: |
The list of volume types separated by commas. In an environment with
multiple-storage back ends, the scheduler determines where to send
the volume based on the volume type. For information about how to
use volume types to create multiple-storage back ends, see
`Configure multiple-storage back ends
`_.
in: body
required: true
type: string
volumes:
description: |
A list of ``volume`` objects.
in: body
required: true
type: array
volumes_number:
description: |
The number of volumes that are allowed for each project.
in: body
required: true
type: integer
volumes_number_for_type:
description: |
The number of volumes that are allowed for each project and
the specified volume type.
in: body
required: true
type: integer
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/qos-specs-v2-qos-specs.inc 0000664 0000000 0000000 00000013506 15131732575 0026417 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Quality of service (QoS) specifications (qos-specs)
===================================================
Administrators only.
Creates, lists, shows details for, associates, disassociates, sets
keys, unsets keys, and deletes quality of service (QoS)
specifications.
Disassociate QoS specification from all associations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/qos-specs/{qos_id}/disassociate_all
Disassociates a QoS specification from all associations.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- qos_id: qos_id
Unset keys in QoS specification
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v2/{project_id}/qos-specs/{qos_id}/delete_keys
Unsets keys in a QoS specification.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- keys: keys
- project_id: project_id_path
- qos_id: qos_id
Request Example
---------------
.. literalinclude:: ./samples/qos-unset-request.json
:language: javascript
Response Example
----------------
There is no body content for the response of a successful PUT operation.
Get all associations for QoS specification
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/qos-specs/{qos_id}/associations
Lists all associations for a QoS specification.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- qos_id: qos_id
Response Example
----------------
.. literalinclude:: ./samples/qos-show-response.json
:language: javascript
Associate QoS specification with volume type
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/qos-specs/{qos_id}/associate
Associates a QoS specification with a volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- qos_id: qos_id
- vol_type_id: vol_type_id
Disassociate QoS specification from volume type
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/qos-specs/{qos_id}/disassociate
Disassociates a QoS specification from a volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- qos_id: qos_id
- vol_type_id: vol_type_id
Show QoS specification details
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/qos-specs/{qos_id}
Shows details for a QoS specification.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 401
- 403
- 404
- 405
- 413
- 503
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- qos_id: qos_id
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- name: name_4
- links: links_2
- id: id_3
- qos_specs: qos_specs
- consumer: consumer
- specs: specs
Response Example
----------------
.. literalinclude:: ./samples/qos-show-response.json
:language: javascript
Set keys in QoS specification
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v2/{project_id}/qos-specs/{qos_id}
Sets keys in a QoS specification.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- qos_specs: qos_specs
- specs: specs
- project_id: project_id_path
- qos_id: qos_id
Request Example
---------------
.. literalinclude:: ./samples/qos-update-request.json
:language: javascript
Response Example
----------------
.. literalinclude:: ./samples/qos-update-response.json
:language: javascript
Delete QoS specification
~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: DELETE /v2/{project_id}/qos-specs/{qos_id}
Deletes a QoS specification.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- qos_id: qos_id
- force: force
Create QoS specification
~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/qos-specs
Creates a QoS specification.
Specify one or more key and value pairs in the request body.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- qos_specs: qos_specs
- consumer: consumer
- name: name_4
- project_id: project_id_path
Request Example
---------------
.. literalinclude:: ./samples/qos-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- name: name_4
- links: links_2
- id: id_3
- qos_specs: qos_specs
- consumer: consumer
- specs: specs
List QoS specs
~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/qos-specs
Lists quality of service (QoS) specifications.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- sort_key: sort_key
- sort_dir: sort_dir
- limit: limit
- marker: marker
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- specs: specs
- qos_specs: qos_specs
- consumer: consumer
- id: id_3
- name: name_4
Response Example
----------------
.. literalinclude:: ./samples/qos-list-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/quota-classes.inc 0000664 0000000 0000000 00000004745 15131732575 0025033 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Quota class set extension (os-quota-class-sets)
===============================================
Administrators only, depending on policy settings.
Shows and updates quota classes for a project.
Show quota classes
~~~~~~~~~~~~~~~~~~
.. rest_method::
GET /v2/{admin_project_id}/os-quota-class-sets/{quota_class_name}
Shows quota class set for a project. If no specific value for the quota class
resource exists, then the default value will be reported.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 403
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- quota_class_name: quota_class_name
- admin_project_id: admin_project_id
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- backup_gigabytes: maxTotalBackupGigabytes
- backups: maxTotalBackups
- gigabytes: maxTotalVolumeGigabytes
- groups: maxTotalGroups
- per_volume_gigabytes: perVolumeGigabytes
- snapshots: maxTotalSnapshots
- volumes: maxTotalVolumes
- id: quota_class_id
Response Example
----------------
.. literalinclude:: ./samples/quota-classes-show-response.json
:language: javascript
Update quota classes
~~~~~~~~~~~~~~~~~~~~
.. rest_method::
PUT /v2/{admin_project_id}/os-quota-class-sets/{quota_class_name}
Updates quota class set for a project. If the ``quota_class_name`` key does not
exist, then the API will create one.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 403
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- admin_project_id: admin_project_id
- quota_class_name: quota_class_name
- gigabytes: maxTotalVolumeGigabytesOptional
- snapshots: maxTotalSnapshotsOptional
- volumes: maxTotalVolumesOptional
- volume-type: volume_type
Request Example
---------------
.. literalinclude:: ./samples/quota-classes-update-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- backup_gigabytes: maxTotalBackupGigabytes
- backups: maxTotalBackups
- gigabytes: maxTotalVolumeGigabytes
- groups: maxTotalGroups
- per_volume_gigabytes: perVolumeGigabytes
- snapshots: maxTotalSnapshots
- volumes: maxTotalVolumes
Response Example
----------------
.. literalinclude:: ./samples/quota-classes-update-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/quota-sets.inc 0000664 0000000 0000000 00000010167 15131732575 0024347 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Quota sets extension (os-quota-sets)
====================================
Administrators only, depending on policy settings.
Shows, updates, and deletes quotas for a project.
Show quotas
~~~~~~~~~~~
.. rest_method:: GET /v2/{admin_project_id}/os-quota-sets/{project_id}
Shows quotas for a project.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: quotas_project_id
- admin_project_id: admin_project_id
- usage: usage
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- quota_set: quota_set
- id: project_id
- volumes: volumes_number
- volumes_{volume_type}: volumes_number_for_type
- snapshots: snapshots_number
- snapshots_{volume_type}: snapshots_number_for_type
- backups: backups_number
- groups: groups_number
- per_volume_gigabytes: per_volume_gigabytes
- gigabytes: gigabytes
- gigabytes_{volume_type}: gigabytes_for_type
- backup_gigabytes: backup_gigabytes
Response Example
----------------
.. literalinclude:: ./samples/quotas-show-response.json
:language: javascript
Update quotas
~~~~~~~~~~~~~
.. rest_method:: PUT /v2/{admin_project_id}/os-quota-sets/{project_id}
Updates quotas for a project.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- admin_project_id: admin_project_id
- project_id: quotas_project_id
- quota_set: quota_set
- volumes: volumes_number
- volumes_{volume_type}: volumes_number_for_type
- snapshots: snapshots_number
- snapshots_{volume_type}: snapshots_number_for_type
- backups: backups_number
- groups: groups_number
- per_volume_gigabytes: per_volume_gigabytes
- gigabytes: gigabytes
- gigabytes_{volume_type}: gigabytes_for_type
- backup_gigabytes: backup_gigabytes
Request Example
---------------
.. literalinclude:: ./samples/quotas-update-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- quota_set: quota_set
- volumes: volumes_number
- volumes_{volume_type}: volumes_number_for_type
- snapshots: snapshots_number
- snapshots_{volume_type}: snapshots_number_for_type
- backups: backups_number
- groups: groups_number
- per_volume_gigabytes: per_volume_gigabytes
- gigabytes: gigabytes
- gigabytes_{volume_type}: gigabytes_for_type
- backup_gigabytes: backup_gigabytes
Response Example
----------------
.. literalinclude:: ./samples/quotas-update-response.json
:language: javascript
Delete quotas
~~~~~~~~~~~~~
.. rest_method:: DELETE /v2/{admin_project_id}/os-quota-sets/{project_id}
Deletes quotas for a project so the quotas revert to default values.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: quotas_project_id
- admin_project_id: admin_project_id
Response Example
----------------
There is no body content for the response of a successful DELETE operation.
Get default quotas
~~~~~~~~~~~~~~~~~~
.. rest_method::
GET /v2/{admin_project_id}/os-quota-sets/{project_id}/defaults
Gets default quotas for a project.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: quotas_project_id
- admin_project_id: admin_project_id
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- quota_set: quota_set
- id: project_id
- volumes: volumes_number
- volumes_{volume_type}: volumes_number_for_type
- snapshots: snapshots_number
- snapshots_{volume_type}: snapshots_number_for_type
- backups: backups_number
- groups: groups_number
- per_volume_gigabytes: per_volume_gigabytes
- gigabytes: gigabytes
- gigabytes_{volume_type}: gigabytes_for_type
- backup_gigabytes: backup_gigabytes
Response Example
----------------
.. literalinclude:: ./samples/quotas-show-defaults-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/ 0000775 0000000 0000000 00000000000 15131732575 0023206 5 ustar 00root root 0000000 0000000 availability-zone-list-response.json 0000664 0000000 0000000 00000000171 15131732575 0032251 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"availabilityZoneInfo": [{
"zoneState": {
"available": true
},
"zoneName": "nova"
}]
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/backend-capabilities-response.json0000664 0000000 0000000 00000002042 15131732575 0031751 0 ustar 00root root 0000000 0000000 {
"namespace": "OS::Storage::Capabilities::fake",
"vendor_name": "OpenStack",
"volume_backend_name": "lvmdriver-1",
"pool_name": "pool",
"driver_version": "2.0.0",
"storage_protocol": "iSCSI",
"display_name": "Capabilities of Cinder LVM driver",
"description": "These are volume type options provided by Cinder LVM driver, blah, blah.",
"visibility": "public",
"replication_targets": [],
"properties": {
"compression": {
"title": "Compression",
"description": "Enables compression.",
"type": "boolean"
},
"qos": {
"title": "QoS",
"description": "Enables QoS.",
"type": "boolean"
},
"replication": {
"title": "Replication",
"description": "Enables replication.",
"type": "boolean"
},
"thin_provisioning": {
"title": "Thin Provisioning",
"description": "Sets thin provisioning.",
"type": "boolean"
}
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/backup-create-request.json 0000664 0000000 0000000 00000000310 15131732575 0030267 0 ustar 00root root 0000000 0000000 {
"backup": {
"container": null,
"description": null,
"name": "backup001",
"volume_id": "64f5d2fb-d836-4063-b7e2-544d5c1ff607",
"incremental": true
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/backup-create-response.json 0000664 0000000 0000000 00000001004 15131732575 0030436 0 ustar 00root root 0000000 0000000 {
"backup": {
"id": "deac8b8c-35c9-4c71-acaa-889c2d5d5c8e",
"links": [
{
"href": "http://localhost:8776/v2/c95fc3e4afe248a49a28828f286a7b38/backups/deac8b8c-35c9-4c71-acaa-889c2d5d5c8e",
"rel": "self"
},
{
"href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/deac8b8c-35c9-4c71-acaa-889c2d5d5c8e",
"rel": "bookmark"
}
],
"name": "backup001"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/backup-force-delete-request.json 0000664 0000000 0000000 00000000036 15131732575 0031367 0 ustar 00root root 0000000 0000000 {
"os-force_delete": {}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/backup-record-export-response.json0000664 0000000 0000000 00000000173 15131732575 0031776 0 ustar 00root root 0000000 0000000 {
"backup-record": {
"backup_service": "cinder.backup.drivers.swift",
"backup_url": "eyJzdGF0"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/backup-record-import-request.json 0000664 0000000 0000000 00000000173 15131732575 0031621 0 ustar 00root root 0000000 0000000 {
"backup-record": {
"backup_service": "cinder.backup.drivers.swift",
"backup_url": "eyJzdGF0"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/backup-record-import-response.json0000664 0000000 0000000 00000000775 15131732575 0031777 0 ustar 00root root 0000000 0000000 {
"backup": {
"id": "deac8b8c-35c9-4c71-acaa-889c2d5d5c8e",
"links": [
{
"href": "http://localhost:8776/v2/c95fc3e4afe248a49a28828f286a7b38/backups/deac8b8c-35c9-4c71-acaa-889c2d5d5c8e",
"rel": "self"
},
{
"href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/deac8b8c-35c9-4c71-acaa-889c2d5d5c8e",
"rel": "bookmark"
}
],
"name": null
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/backup-reset-status-request.json 0000664 0000000 0000000 00000000101 15131732575 0031465 0 ustar 00root root 0000000 0000000 {
"os-reset_status": {
"status": "available"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/backup-restore-request.json 0000664 0000000 0000000 00000000161 15131732575 0030513 0 ustar 00root root 0000000 0000000 {
"restore": {
"name": "vol-01",
"volume_id": "64f5d2fb-d836-4063-b7e2-544d5c1ff607"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/backup-restore-response.json 0000664 0000000 0000000 00000000224 15131732575 0030661 0 ustar 00root root 0000000 0000000 {
"restore": {
"backup_id": "2ef47aee-8844-490c-804d-2a8efe561c65",
"volume_id": "795114e8-7489-40be-a978-83797f2c1dd3"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/backup-show-response.json 0000664 0000000 0000000 00000001617 15131732575 0030165 0 ustar 00root root 0000000 0000000 {
"backup": {
"availability_zone": "az1",
"container": "volumebackups",
"created_at": "2013-04-02T10:35:27.000000",
"description": null,
"fail_reason": null,
"id": "2ef47aee-8844-490c-804d-2a8efe561c65",
"links": [
{
"href": "http://localhost:8776/v2/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65",
"rel": "self"
},
{
"href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65",
"rel": "bookmark"
}
],
"name": "backup001",
"object_count": 22,
"size": 1,
"status": "available",
"volume_id": "e5185058-943a-4cb4-96d9-72c184c337d6",
"is_incremental": true,
"has_dependent_backups": false
}
}
backups-list-detailed-response.json 0000664 0000000 0000000 00000003746 15131732575 0032042 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"backups": [
{
"availability_zone": "az1",
"container": "volumebackups",
"created_at": "2013-04-02T10:35:27.000000",
"description": null,
"fail_reason": null,
"id": "2ef47aee-8844-490c-804d-2a8efe561c65",
"links": [
{
"href": "http://localhost:8776/v2/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65",
"rel": "self"
},
{
"href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65",
"rel": "bookmark"
}
],
"name": "backup001",
"object_count": 22,
"size": 1,
"status": "available",
"volume_id": "e5185058-943a-4cb4-96d9-72c184c337d6",
"is_incremental": true,
"has_dependent_backups": false
},
{
"availability_zone": "az1",
"container": "volumebackups",
"created_at": "2013-04-02T10:21:48.000000",
"description": null,
"fail_reason": null,
"id": "4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8",
"links": [
{
"href": "http://localhost:8776/v2/c95fc3e4afe248a49a28828f286a7b38/backups/4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8",
"rel": "self"
},
{
"href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8",
"rel": "bookmark"
}
],
"name": "backup002",
"object_count": 22,
"size": 1,
"status": "available",
"volume_id": "e5185058-943a-4cb4-96d9-72c184c337d6",
"is_incremental": true,
"has_dependent_backups": false
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/backups-list-response.json 0000664 0000000 0000000 00000002170 15131732575 0030336 0 ustar 00root root 0000000 0000000 {
"backups": [
{
"id": "2ef47aee-8844-490c-804d-2a8efe561c65",
"links": [
{
"href": "http://localhost:8776/v2/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65",
"rel": "self"
},
{
"href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65",
"rel": "bookmark"
}
],
"name": "backup001"
},
{
"id": "4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8",
"links": [
{
"href": "http://localhost:8776/v2/c95fc3e4afe248a49a28828f286a7b38/backups/4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8",
"rel": "self"
},
{
"href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8",
"rel": "bookmark"
}
],
"name": "backup002"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/cgsnapshots-create-request.json 0000664 0000000 0000000 00000000510 15131732575 0031360 0 ustar 00root root 0000000 0000000 {
"cgsnapshot": {
"consistencygroup_id": "6f519a48-3183-46cf-a32f-41815f814546",
"name": "firstcg",
"description": "first consistency group",
"user_id": "6f519a48-3183-46cf-a32f-41815f814444",
"project_id": "6f519a48-3183-46cf-a32f-41815f815555",
"status": "creating"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/cgsnapshots-create-response.json 0000664 0000000 0000000 00000000156 15131732575 0031534 0 ustar 00root root 0000000 0000000 {
"cgsnapshot": {
"id": "6f519a48-3183-46cf-a32f-41815f816666",
"name": "firstcg"
}
}
cgsnapshots-list-detailed-response.json 0000664 0000000 0000000 00000001250 15131732575 0032732 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"cgsnapshots": [
{
"id": "6f519a48-3183-46cf-a32f-41815f813986",
"consistencygroup_id": "6f519a48-3183-46cf-a32f-41815f814444",
"status": "available",
"created_at": "2015-09-16T09:28:52.000000",
"name": "my-cg1",
"description": "my first consistency group"
},
{
"id": "aed36625-a6d7-4681-ba59-c7ba3d18c148",
"consistencygroup_id": "aed36625-a6d7-4681-ba59-c7ba3d18dddd",
"status": "error",
"created_at": "2015-09-16T09:31:15.000000",
"name": "my-cg2",
"description": "Edited description"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/cgsnapshots-list-response.json 0000664 0000000 0000000 00000000366 15131732575 0031247 0 ustar 00root root 0000000 0000000 {
"cgsnapshots": [
{
"id": "6f519a48-3183-46cf-a32f-41815f813986",
"name": "my-cg1"
},
{
"id": "aed36625-a6d7-4681-ba59-c7ba3d18c148",
"name": "my-cg2"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/cgsnapshots-show-response.json 0000664 0000000 0000000 00000000474 15131732575 0031254 0 ustar 00root root 0000000 0000000 {
"cgsnapshot": {
"id": "6f519a48-3183-46cf-a32f-41815f813986",
"consistencygroup_id": "6f519a48-3183-46cf-a32f-41815f814444",
"status": "available",
"created_at": "2015-09-16T09:28:52.000000",
"name": "my-cg1",
"description": "my first consistency group"
}
}
consistency-group-create-from-src-request.json 0000664 0000000 0000000 00000000620 15131732575 0034170 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"consistencygroup-from-src": {
"name": "firstcg",
"description": "first consistency group",
"cgsnapshot_id": "6f519a48-3183-46cf-a32f-41815f813986",
"source_cgid": "6f519a48-3183-46cf-a32f-41815f814546",
"user_id": "6f519a48-3183-46cf-a32f-41815f815555",
"project_id": "6f519a48-3183-46cf-a32f-41815f814444",
"status": "creating"
}
}
consistency-group-create-request.json 0000664 0000000 0000000 00000000273 15131732575 0032446 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"consistencygroup": {
"name": "firstcg",
"description": "first consistency group",
"volume_types": "type1,type2",
"availability_zone": "az0"
}
}
consistency-group-create-response.json 0000664 0000000 0000000 00000000505 15131732575 0032612 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"consistencygroup": {
"status": "error",
"description": "first consistency group",
"availability_zone": "az0",
"created_at": "2016-08-19T19:32:19.000000",
"volume_types": ["type1", "type2"],
"id": "63d1a274-de38-4384-a97e-475306777027",
"name": "firstcg"
}
}
consistency-group-delete-request.json 0000664 0000000 0000000 00000000073 15131732575 0032443 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"consistencygroup": {
"force": false
}
}
consistency-group-show-response.json 0000664 0000000 0000000 00000000531 15131732575 0032326 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"consistencygroup": {
"id": "6f519a48-3183-46cf-a32f-41815f813986",
"status": "available",
"availability_zone": "az1",
"created_at": "2015-09-16T09:28:52.000000",
"name": "my-cg1",
"description": "my first consistency group",
"volume_types": [
"123456"
]
}
}
consistency-group-update-request.json 0000664 0000000 0000000 00000000332 15131732575 0032461 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"consistencygroup": {
"name": "my_cg",
"description": "My consistency group",
"add_volumes": "volume-uuid-1,volume-uuid-2",
"remove_volumes": "volume-uuid-8,volume-uuid-9"
}
}
consistency-groups-list-detailed-response.json 0000664 0000000 0000000 00000001364 15131732575 0034262 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"consistencygroups": [
{
"id": "6f519a48-3183-46cf-a32f-41815f813986",
"status": "available",
"availability_zone": "az1",
"created_at": "2015-09-16T09:28:52.000000",
"name": "my-cg1",
"description": "my first consistency group",
"volume_types": [
"123456"
]
},
{
"id": "aed36625-a6d7-4681-ba59-c7ba3d18c148",
"status": "error",
"availability_zone": "az2",
"created_at": "2015-09-16T09:31:15.000000",
"name": "my-cg2",
"description": "Edited description",
"volume_types": [
"234567"
]
}
]
}
consistency-groups-list-response.json 0000664 0000000 0000000 00000000374 15131732575 0032511 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"consistencygroups": [
{
"id": "6f519a48-3183-46cf-a32f-41815f813986",
"name": "my-cg1"
},
{
"id": "aed36625-a6d7-4681-ba59-c7ba3d18c148",
"name": "my-cg2"
}
]
}
encryption-type-create-request.json 0000664 0000000 0000000 00000000236 15131732575 0032123 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"encryption":{
"key_size": 256,
"provider": "luks",
"control_location":"front-end",
"cipher": "aes-xts-plain64"
}
}
encryption-type-create-response.json 0000664 0000000 0000000 00000000443 15131732575 0032271 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"encryption": {
"volume_type_id": "2d29462d-76cb-417c-8a9f-fb23140f1577",
"control_location": "front-end",
"encryption_id": "81e069c6-7394-4856-8df7-3b237ca61f74",
"key_size": 256,
"provider": "luks",
"cipher": "aes-xts-plain64"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/encryption-type-show-response.json0000664 0000000 0000000 00000000547 15131732575 0032072 0 ustar 00root root 0000000 0000000 {
"volume_type_id": "2d29462d-76cb-417c-8a9f-fb23140f1577",
"control_location": "front-end",
"deleted": false,
"created_at": "2016-12-28T02:32:25.000000",
"updated_at": null,
"encryption_id": "81e069c6-7394-4856-8df7-3b237ca61f74",
"key_size": 256,
"provider": "luks",
"deleted_at": null,
"cipher": "aes-xts-plain64"
}
encryption-type-update-request.json 0000664 0000000 0000000 00000000167 15131732575 0032145 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"encryption":{
"key_size": 64,
"provider": "luks",
"control_location":"back-end"
}
}
encryption-type-update-response.json 0000664 0000000 0000000 00000000167 15131732575 0032313 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"encryption":{
"key_size": 64,
"provider": "luks",
"control_location":"back-end"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/extensions-list-response.json 0000664 0000000 0000000 00000021053 15131732575 0031106 0 ustar 00root root 0000000 0000000 {
"extensions": [
{
"updated": "2013-04-18T00:00:00+00:00",
"name": "SchedulerHints",
"links": [],
"namespace": "https://docs.openstack.org/block-service/ext/scheduler-hints/api/v2",
"alias": "OS-SCH-HNT",
"description": "Pass arbitrary key/value pairs to the scheduler."
},
{
"updated": "2011-06-29T00:00:00+00:00",
"name": "Hosts",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/hosts/api/v1.1",
"alias": "os-hosts",
"description": "Admin-only host administration."
},
{
"updated": "2011-11-03T00:00:00+00:00",
"name": "VolumeTenantAttribute",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/volume_tenant_attribute/api/v1",
"alias": "os-vol-tenant-attr",
"description": "Expose the internal project_id as an attribute of a volume."
},
{
"updated": "2011-08-08T00:00:00+00:00",
"name": "Quotas",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/quotas-sets/api/v1.1",
"alias": "os-quota-sets",
"description": "Quota management support."
},
{
"updated": "2011-08-24T00:00:00+00:00",
"name": "TypesManage",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/types-manage/api/v1",
"alias": "os-types-manage",
"description": "Types manage support."
},
{
"updated": "2013-07-10T00:00:00+00:00",
"name": "VolumeEncryptionMetadata",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/os-volume-encryption-metadata/api/v1",
"alias": "os-volume-encryption-metadata",
"description": "Volume encryption metadata retrieval support."
},
{
"updated": "2012-12-12T00:00:00+00:00",
"name": "Backups",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/backups/api/v1",
"alias": "backups",
"description": "Backups support."
},
{
"updated": "2013-07-16T00:00:00+00:00",
"name": "SnapshotActions",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/snapshot-actions/api/v1.1",
"alias": "os-snapshot-actions",
"description": "Enable snapshot manager actions."
},
{
"updated": "2012-05-31T00:00:00+00:00",
"name": "VolumeActions",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/volume-actions/api/v1.1",
"alias": "os-volume-actions",
"description": "Enable volume actions\n "
},
{
"updated": "2013-10-03T00:00:00+00:00",
"name": "UsedLimits",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/used-limits/api/v1.1",
"alias": "os-used-limits",
"description": "Provide data on limited resources that are being used."
},
{
"updated": "2012-05-31T00:00:00+00:00",
"name": "VolumeUnmanage",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/volume-unmanage/api/v1.1",
"alias": "os-volume-unmanage",
"description": "Enable volume unmanage operation."
},
{
"updated": "2011-11-03T00:00:00+00:00",
"name": "VolumeHostAttribute",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/volume_host_attribute/api/v1",
"alias": "os-vol-host-attr",
"description": "Expose host as an attribute of a volume."
},
{
"updated": "2013-07-01T00:00:00+00:00",
"name": "VolumeTypeEncryption",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/volume-type-encryption/api/v1",
"alias": "encryption",
"description": "Encryption support for volume types."
},
{
"updated": "2013-06-27T00:00:00+00:00",
"name": "AvailabilityZones",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/os-availability-zone/api/v1",
"alias": "os-availability-zone",
"description": "Describe Availability Zones."
},
{
"updated": "2013-08-02T00:00:00+00:00",
"name": "Qos_specs_manage",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/qos-specs/api/v1",
"alias": "qos-specs",
"description": "QoS specs support."
},
{
"updated": "2011-08-24T00:00:00+00:00",
"name": "TypesExtraSpecs",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/types-extra-specs/api/v1",
"alias": "os-types-extra-specs",
"description": "Type extra specs support."
},
{
"updated": "2013-08-08T00:00:00+00:00",
"name": "VolumeMigStatusAttribute",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/volume_mig_status_attribute/api/v1",
"alias": "os-vol-mig-status-attr",
"description": "Expose migration_status as an attribute of a volume."
},
{
"updated": "2012-08-13T00:00:00+00:00",
"name": "CreateVolumeExtension",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/image-create/api/v1",
"alias": "os-image-create",
"description": "Allow creating a volume from an image in the Create Volume API."
},
{
"updated": "2014-01-10T00:00:00-00:00",
"name": "ExtendedServices",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/extended_services/api/v2",
"alias": "os-extended-services",
"description": "Extended services support."
},
{
"updated": "2012-06-19T00:00:00+00:00",
"name": "ExtendedSnapshotAttributes",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/extended_snapshot_attributes/api/v1",
"alias": "os-extended-snapshot-attributes",
"description": "Extended SnapshotAttributes support."
},
{
"updated": "2012-12-07T00:00:00+00:00",
"name": "VolumeImageMetadata",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/volume_image_metadata/api/v1",
"alias": "os-vol-image-meta",
"description": "Show image metadata associated with the volume."
},
{
"updated": "2012-03-12T00:00:00+00:00",
"name": "QuotaClasses",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/quota-classes-sets/api/v1.1",
"alias": "os-quota-class-sets",
"description": "Quota classes management support."
},
{
"updated": "2013-05-29T00:00:00+00:00",
"name": "VolumeTransfer",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/volume-transfer/api/v1.1",
"alias": "os-volume-transfer",
"description": "Volume transfer management support."
},
{
"updated": "2014-02-10T00:00:00+00:00",
"name": "VolumeManage",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/os-volume-manage/api/v1",
"alias": "os-volume-manage",
"description": "Allows existing backend storage to be 'managed' by Cinder."
},
{
"updated": "2012-08-25T00:00:00+00:00",
"name": "AdminActions",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/admin-actions/api/v1.1",
"alias": "os-admin-actions",
"description": "Enable admin actions."
},
{
"updated": "2012-10-28T00:00:00-00:00",
"name": "Services",
"links": [],
"namespace": "https://docs.openstack.org/volume/ext/services/api/v2",
"alias": "os-services",
"description": "Services support."
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/host-attach-request.json 0000664 0000000 0000000 00000000074 15131732575 0030007 0 ustar 00root root 0000000 0000000 {
"os-attach": {
"host_name": "my_host"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/hosts-get-response.json 0000664 0000000 0000000 00000001166 15131732575 0027656 0 ustar 00root root 0000000 0000000 {
"host": [{
"resource": {
"volume_count": "8",
"total_volume_gb": "11",
"total_snapshot_gb": "1",
"project": "(total)",
"host": "node1@rbd-sas",
"snapshot_count": "1"
}
},
{
"resource": {
"volume_count": "8",
"total_volume_gb": "11",
"total_snapshot_gb": "1",
"project": "f21a9c86d7114bf99c711f4874d80474",
"host": "node1@rbd-sas",
"snapshot_count": "1"
}
}]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/hosts-list-response.json 0000664 0000000 0000000 00000001307 15131732575 0030047 0 ustar 00root root 0000000 0000000 {
"hosts": [{
"service-status": "available",
"service": "cinder-backup",
"zone": "nova",
"service-state": "enabled",
"host_name": "node1",
"last-update": "2017-03-09T21:38:41.000000"
},
{
"service-status": "available",
"service": "cinder-scheduler",
"zone": "nova",
"service-state": "enabled",
"host_name": "node1",
"last-update": "2017-03-09T21:38:38.000000"
},
{
"service-status": "available",
"service": "cinder-volume",
"zone": "nova",
"service-state": "enabled",
"host_name": "node1@lvm",
"last-update": "2017-03-09T21:38:35.000000"
}]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/image-metadata-show-request.json 0000664 0000000 0000000 00000000045 15131732575 0031404 0 ustar 00root root 0000000 0000000 {
"os-show_image_metadata": {}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/image-metadata-show-response.json 0000664 0000000 0000000 00000000116 15131732575 0031551 0 ustar 00root root 0000000 0000000 {
"metadata": {
"key1": "value1",
"key2": "value2"
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/limits-show-response.json 0000664 0000000 0000000 00000000715 15131732575 0030217 0 ustar 00root root 0000000 0000000 {
"limits": {
"rate": [],
"absolute": {
"totalSnapshotsUsed": 0,
"maxTotalBackups": 10,
"maxTotalVolumeGigabytes": 1000,
"maxTotalSnapshots": 10,
"maxTotalBackupGigabytes": 1000,
"totalBackupGigabytesUsed": 0,
"maxTotalVolumes": 10,
"totalVolumesUsed": 0,
"totalBackupsUsed": 0,
"totalGigabytesUsed": 0
}
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/pools-list-detailed-response.json 0000664 0000000 0000000 00000001627 15131732575 0031621 0 ustar 00root root 0000000 0000000 {
"pools": [
{
"name": "pool1",
"capabilities": {
"updated": "2014-10-28T00:00:00-00:00",
"total_capacity_gb": 1024,
"free_capacity_gb": 100,
"volume_backend_name": "pool1",
"reserved_percentage": 0,
"driver_version": "1.0.0",
"storage_protocol": "iSCSI",
"QoS_support": false
}
},
{
"name": "pool2",
"capabilities": {
"updated": "2014-10-28T00:00:00-00:00",
"total_capacity_gb": 512,
"free_capacity_gb": 200,
"volume_backend_name": "pool2",
"reserved_percentage": 0,
"driver_version": "1.0.1",
"storage_protocol": "iSER",
"QoS_support": true
}
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/qos-create-request.json 0000664 0000000 0000000 00000000200 15131732575 0027622 0 ustar 00root root 0000000 0000000 {
"qos_specs": {
"availability": "100",
"name": "reliability-spec",
"numberOfFailures": "0"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/qos-create-response.json 0000664 0000000 0000000 00000001171 15131732575 0030000 0 ustar 00root root 0000000 0000000 {
"qos_specs": {
"specs": {
"numberOfFailures": "0",
"availability": "100"
},
"consumer": "back-end",
"name": "reliability-spec",
"id": "599ef437-1c99-42ec-9fc6-239d0519fef1"
},
"links": [
{
"href": "http://23.253.248.171:8776/v2/bab7d5c60cd041a0a36f7c4b6e1dd978/qos_specs/599ef437-1c99-42ec-9fc6-239d0519fef1",
"rel": "self"
},
{
"href": "http://23.253.248.171:8776/bab7d5c60cd041a0a36f7c4b6e1dd978/qos_specs/599ef437-1c99-42ec-9fc6-239d0519fef1",
"rel": "bookmark"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/qos-list-response.json 0000664 0000000 0000000 00000001055 15131732575 0027511 0 ustar 00root root 0000000 0000000 {
"qos_specs": [
{
"specs": {
"availability": "100",
"numberOfFailures": "0"
},
"consumer": "back-end",
"name": "reliability-spec",
"id": "0388d6c6-d5d4-42a3-b289-95205c50dd15"
},
{
"specs": {
"delay": "0",
"throughput": "100"
},
"consumer": "back-end",
"name": "performance-spec",
"id": "ecfc6e2e-7117-44a4-8eec-f84d04f531a8"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/qos-show-response.json 0000664 0000000 0000000 00000001171 15131732575 0027515 0 ustar 00root root 0000000 0000000 {
"qos_specs": {
"specs": {
"availability": "100",
"numberOfFailures": "0"
},
"consumer": "back-end",
"name": "reliability-spec",
"id": "0388d6c6-d5d4-42a3-b289-95205c50dd15"
},
"links": [
{
"href": "http://23.253.228.211:8776/v2/e1cf63117ae74309a5bcc2002a23be8b/qos_specs/0388d6c6-d5d4-42a3-b289-95205c50dd15",
"rel": "self"
},
{
"href": "http://23.253.228.211:8776/e1cf63117ae74309a5bcc2002a23be8b/qos_specs/0388d6c6-d5d4-42a3-b289-95205c50dd15",
"rel": "bookmark"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/qos-unset-request.json 0000664 0000000 0000000 00000000047 15131732575 0027526 0 ustar 00root root 0000000 0000000 {
"keys": [
"key1"
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/qos-update-request.json 0000664 0000000 0000000 00000000062 15131732575 0027647 0 ustar 00root root 0000000 0000000 {
"qos_specs": {
"delay": "1"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/qos-update-response.json 0000664 0000000 0000000 00000000062 15131732575 0030015 0 ustar 00root root 0000000 0000000 {
"qos_specs": {
"delay": "1"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/qos_show_response.json 0000664 0000000 0000000 00000000310 15131732575 0027653 0 ustar 00root root 0000000 0000000 {
"qos_associations": [
{
"association_type": "volume_type",
"name": "reliability-type",
"id": "a12983c2-83bd-4afa-be9f-ad796573ead6"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/quota-classes-show-response.json 0000664 0000000 0000000 00000000465 15131732575 0031504 0 ustar 00root root 0000000 0000000 {
"quota_class_set": {
"per_volume_gigabytes": -1,
"volumes_lvmdriver-1": -1,
"groups": 10,
"gigabytes": 1000,
"backup_gigabytes": 1000,
"snapshots": 10,
"gigabytes_lvmdriver-1": -1,
"volumes": 10,
"snapshots_lvmdriver-1": -1,
"backups": 10,
"id": "default"
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/quota-classes-update-request.json 0000664 0000000 0000000 00000000150 15131732575 0031627 0 ustar 00root root 0000000 0000000 {
"quota_class_set": {
"volumes_lmv": 10,
"gigabytes_lmv": 1000,
"snapshots_lmv": 10
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/quota-classes-update-response.json0000664 0000000 0000000 00000000440 15131732575 0031777 0 ustar 00root root 0000000 0000000 {
"quota_class_set": {
"per_volume_gigabytes": -1,
"volumes_lvmdriver-1": -1,
"groups": 10,
"gigabytes": 1000,
"backup_gigabytes": 1000,
"snapshots": 10,
"gigabytes_lvmdriver-1": -1,
"volumes": 10,
"snapshots_lvmdriver-1": -1,
"backups": 10
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/quotas-show-defaults-response.json0000664 0000000 0000000 00000000144 15131732575 0032033 0 ustar 00root root 0000000 0000000 {
"quota_set": {
"gigabytes": 5,
"snapshots": 10,
"volumes": 20
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/quotas-show-response.json 0000664 0000000 0000000 00000000144 15131732575 0030226 0 ustar 00root root 0000000 0000000 {
"quota_set": {
"gigabytes": 5,
"snapshots": 10,
"volumes": 20
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/quotas-update-request.json 0000664 0000000 0000000 00000000065 15131732575 0030364 0 ustar 00root root 0000000 0000000 {
"quota_set": {
"snapshots": 45
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/quotas-update-response.json 0000664 0000000 0000000 00000000065 15131732575 0030532 0 ustar 00root root 0000000 0000000 {
"quota_set": {
"snapshots": 45
}
}
quotas-user-show-detailed-response.json 0000664 0000000 0000000 00000000547 15131732575 0032703 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"quota_set": {
"gigabytes": {
"in_use": 100,
"limit": -1,
"reserved": 0
},
"snapshots": {
"in_use": 12,
"limit": -1,
"reserved": 0
},
"volumes": {
"in_use": 1,
"limit": -1,
"reserved": 0
}
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/quotas-user-show-response.json 0000664 0000000 0000000 00000000144 15131732575 0031202 0 ustar 00root root 0000000 0000000 {
"quota_set": {
"gigabytes": 5,
"snapshots": 10,
"volumes": 20
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/snapshot-create-request.json 0000664 0000000 0000000 00000000262 15131732575 0030667 0 ustar 00root root 0000000 0000000 {
"snapshot": {
"name": "snap-001",
"description": "Daily backup",
"volume_id": "5aa119a8-d25b-45a7-8d1b-88e127885635",
"force": true
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/snapshot-create-response.json 0000664 0000000 0000000 00000000516 15131732575 0031037 0 ustar 00root root 0000000 0000000 {
"snapshot": {
"status": "creating",
"description": "Daily backup",
"created_at": "2013-02-25T03:56:53.081642",
"metadata": {},
"volume_id": "5aa119a8-d25b-45a7-8d1b-88e127885635",
"size": 1,
"id": "ffa9bc5e-1172-4021-acaf-cdcd78a9584d",
"name": "snap-001"
}
}
snapshot-metadata-create-request.json 0000664 0000000 0000000 00000000060 15131732575 0032362 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"metadata": {
"key": "v2"
}
}
snapshot-metadata-create-response.json 0000664 0000000 0000000 00000000060 15131732575 0032530 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"metadata": {
"key": "v2"
}
}
snapshot-metadata-show-response.json 0000664 0000000 0000000 00000000063 15131732575 0032250 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"metadata": {
"name": "test"
}
}
snapshot-metadata-update-request.json 0000664 0000000 0000000 00000000060 15131732575 0032401 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"metadata": {
"key": "v2"
}
}
snapshot-metadata-update-response.json 0000664 0000000 0000000 00000000060 15131732575 0032547 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"metadata": {
"key": "v2"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/snapshot-show-response.json 0000664 0000000 0000000 00000001023 15131732575 0030546 0 ustar 00root root 0000000 0000000 {
"snapshot": {
"status": "available",
"os-extended-snapshot-attributes:progress": "100%",
"description": "Daily backup",
"created_at": "2013-02-25T04:13:17.000000",
"metadata": {},
"volume_id": "5aa119a8-d25b-45a7-8d1b-88e127885635",
"os-extended-snapshot-attributes:project_id": "0c2eba2c5af04d3f9e9d0d410b371fde",
"size": 1,
"id": "2bb856e1-b3d8-4432-a858-09e4ce939389",
"name": "snap-001",
"updated_at": "2013-03-11T07:24:57Z"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/snapshot-status-reset-request.json0000664 0000000 0000000 00000000101 15131732575 0032057 0 ustar 00root root 0000000 0000000 {
"os-reset_status": {
"status": "available"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/snapshot-update-request.json 0000664 0000000 0000000 00000000160 15131732575 0030703 0 ustar 00root root 0000000 0000000 {
"snapshot": {
"name": "snap-002",
"description": "This is yet, another snapshot."
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/snapshot-update-response.json 0000664 0000000 0000000 00000000540 15131732575 0031053 0 ustar 00root root 0000000 0000000 {
"snapshot": {
"created_at": "2013-02-20T08:11:34.000000",
"description": "This is yet, another snapshot",
"name": "snap-002",
"id": "4b502fcb-1f26-45f8-9fe5-3b9a0a52eaf2",
"size": 1,
"status": "available",
"metadata": {},
"volume_id": "2402b902-0b7a-458c-9c07-7435a826f794"
}
}
snapshots-list-detailed-response.json 0000664 0000000 0000000 00000001561 15131732575 0032425 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"snapshots": [
{
"status": "available",
"metadata": {
"name": "test"
},
"os-extended-snapshot-attributes:progress": "100%",
"name": "test-volume-snapshot",
"volume_id": "173f7b48-c4c1-4e70-9acc-086b39073506",
"os-extended-snapshot-attributes:project_id": "bab7d5c60cd041a0a36f7c4b6e1dd978",
"created_at": "2015-11-29T02:25:51.000000",
"size": 1,
"id": "b1323cda-8e4b-41c1-afc5-2fc791809c8c",
"description": "volume snapshot",
"updated_at": "2015-12-11T07:24:57Z"
}
],
"snapshots_links": [
{
"href": "https://10.43.176.164:8776/v3/d55fb90e300b436cb2714a17137be023/snapshots?limit=1&marker=2e0cd28e-d7a2-4cdb-87e6-cd37c417c06d",
"rel": "next"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/snapshots-list-response.json 0000664 0000000 0000000 00000001323 15131732575 0030727 0 ustar 00root root 0000000 0000000 {
"snapshots": [
{
"status": "available",
"metadata": {
"name": "test"
},
"name": "test-volume-snapshot",
"volume_id": "173f7b48-c4c1-4e70-9acc-086b39073506",
"created_at": "2015-11-29T02:25:51.000000",
"size": 1,
"id": "b1323cda-8e4b-41c1-afc5-2fc791809c8c",
"description": "volume snapshot",
"updated_at": "2015-12-11T07:24:57Z"
}
],
"snapshots_links": [
{
"href": "https://10.43.176.164:8776/v3/d55fb90e300b436cb2714a17137be023/snapshots?limit=1&marker=2e0cd28e-d7a2-4cdb-87e6-cd37c417c06d",
"rel": "next"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/user-quotas-show-response.json 0000664 0000000 0000000 00000000643 15131732575 0031206 0 ustar 00root root 0000000 0000000 {
"quota_set": {
"cores": 20,
"fixed_ips": -1,
"floating_ips": 10,
"id": "fake_project",
"injected_file_content_bytes": 10240,
"injected_file_path_bytes": 255,
"injected_files": 5,
"instances": 10,
"key_pairs": 100,
"metadata_items": 128,
"ram": 51200,
"security_group_rules": 20,
"security_groups": 10
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/user-quotas-update-request.json 0000664 0000000 0000000 00000000113 15131732575 0031332 0 ustar 00root root 0000000 0000000 {
"quota_set": {
"force": true,
"instances": 9
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/user-quotas-update-response.json 0000664 0000000 0000000 00000000604 15131732575 0031505 0 ustar 00root root 0000000 0000000 {
"quota_set": {
"cores": 20,
"floating_ips": 10,
"fixed_ips": -1,
"injected_file_content_bytes": 10240,
"injected_file_path_bytes": 255,
"injected_files": 5,
"instances": 9,
"key_pairs": 100,
"metadata_items": 128,
"ram": 51200,
"security_group_rules": 20,
"security_groups": 10
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/version-show-response.json 0000664 0000000 0000000 00000001400 15131732575 0030373 0 ustar 00root root 0000000 0000000 {
"versions": [
{
"status": "DEPRECATED",
"updated": "2017-02-25T12:00:00Z",
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.volume+json;version=2"
}
],
"id": "v2.0",
"links": [
{
"href": "https://docs.openstack.org/",
"type": "text/html",
"rel": "describedby"
},
{
"href": "http://23.253.248.171:8776/v2/",
"rel": "self"
}
],
"min_version": "",
"version": ""
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/versions-response.json 0000664 0000000 0000000 00000002757 15131732575 0027620 0 ustar 00root root 0000000 0000000 {
"versions": [
{
"status": "SUPPORTED",
"updated": "2014-06-28T12:20:21Z",
"links": [
{
"href": "https://docs.openstack.org/",
"type": "text/html",
"rel": "describedby"
},
{
"href": "http://10.0.2.15:8776/v2/",
"rel": "self"
}
],
"min_version": "",
"version": "",
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.volume+json;version=1"
}
],
"id": "v2.0"
},
{
"status": "CURRENT",
"updated": "2016-02-08T12:20:21Z",
"links": [
{
"href": "https://docs.openstack.org/",
"type": "text/html",
"rel": "describedby"
},
{
"href": "http://10.0.2.15:8776/v3/",
"rel": "self"
}
],
"min_version": "3.0",
"version": "{Current_Max_Version}",
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.volume+json;version=1"
}
],
"id": "v3.0"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/volume-attach-request.json 0000664 0000000 0000000 00000000177 15131732575 0030345 0 ustar 00root root 0000000 0000000 {
"os-attach": {
"instance_uuid": "95D9EF50-507D-11E5-B970-0800200C9A66",
"mountpoint": "/dev/vdc"
}
}
volume-bootable-status-update-request.json 0000664 0000000 0000000 00000000076 15131732575 0033410 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"os-set_bootable": {
"bootable": "True"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/volume-create-request.json 0000664 0000000 0000000 00000001003 15131732575 0030331 0 ustar 00root root 0000000 0000000 {
"volume": {
"size": 10,
"availability_zone": null,
"source_volid": null,
"description": null,
"multiattach": false,
"snapshot_id": null,
"name": null,
"imageRef": null,
"volume_type": null,
"metadata": {},
"consistencygroup_id": null
},
"OS-SCH-HNT:scheduler_hints": {
"same_host": [
"a0cf03a5-d921-4877-bb5c-86d26cf818e1",
"8c19174f-4220-44f0-824a-cd1eeef10287"
]
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/volume-create-response.json 0000664 0000000 0000000 00000002162 15131732575 0030506 0 ustar 00root root 0000000 0000000 {
"volume": {
"status": "creating",
"migration_status": null,
"user_id": "0eea4eabcf184061a3b6db1e0daaf010",
"attachments": [],
"links": [
{
"href": "http://23.253.248.171:8776/v2/bab7d5c60cd041a0a36f7c4b6e1dd978/volumes/6edbc2f4-1507-44f8-ac0d-eed1d2608d38",
"rel": "self"
},
{
"href": "http://23.253.248.171:8776/bab7d5c60cd041a0a36f7c4b6e1dd978/volumes/6edbc2f4-1507-44f8-ac0d-eed1d2608d38",
"rel": "bookmark"
}
],
"availability_zone": "nova",
"bootable": "false",
"encrypted": false,
"created_at": "2015-11-29T03:01:44.000000",
"description": null,
"updated_at": null,
"volume_type": "lvmdriver-1",
"name": "test-volume-attachments",
"replication_status": "disabled",
"consistencygroup_id": null,
"source_volid": null,
"snapshot_id": null,
"multiattach": false,
"metadata": {},
"id": "6edbc2f4-1507-44f8-ac0d-eed1d2608d38",
"size": 2
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/volume-detach-request.json 0000664 0000000 0000000 00000000136 15131732575 0030324 0 ustar 00root root 0000000 0000000 {
"os-detach": {
"attachment_id": "d8777f54-84cf-4809-a679-468ffed56cf1"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/volume-extend-request.json 0000664 0000000 0000000 00000000063 15131732575 0030362 0 ustar 00root root 0000000 0000000 {
"os-extend": {
"new_size": 3
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/volume-force-delete-request.json 0000664 0000000 0000000 00000000035 15131732575 0031430 0 ustar 00root root 0000000 0000000 {
"os-force_delete": {}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/volume-force-detach-request.json 0000664 0000000 0000000 00000000270 15131732575 0031417 0 ustar 00root root 0000000 0000000 {
"os-force_detach": {
"attachment_id": "d8777f54-84cf-4809-a679-468ffed56cf1",
"connector": {
"initiator": "iqn.2012-07.org.fake:01"
}
}
}
volume-image-metadata-set-request.json 0000664 0000000 0000000 00000000422 15131732575 0032444 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"os-set_image_metadata": {
"metadata": {
"image_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c",
"image_name": "image",
"kernel_id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
"ramdisk_id": "somedisk"
}
}
}
volume-image-metadata-unset-request.json 0000664 0000000 0000000 00000000107 15131732575 0033007 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"os-unset_image_metadata": {
"key": "ramdisk_id"
}
}
volume-image-metadata-update-response.json 0000664 0000000 0000000 00000000636 15131732575 0033310 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"metadata": {
"kernel_id": "6ff710d2-942b-4d6b-9168-8c9cc2404ab1",
"container_format": "bare",
"min_ram": "0",
"ramdisk_id": "somedisk",
"disk_format": "qcow2",
"image_name": "image",
"image_id": "5137a025-3c5f-43c1-bc64-5f41270040a5",
"checksum": "f8ab98ff5e73ebab884d80c9dc9c7290",
"min_disk": "0",
"size": "13267968"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/volume-manage-request.json 0000664 0000000 0000000 00000000663 15131732575 0030331 0 ustar 00root root 0000000 0000000 {
"volume": {
"host": "geraint-VirtualBox",
"ref": {
"source-name": "existingLV",
"source-id": "1234"
},
"name": "New Volume",
"availability_zone": "az2",
"description": "Volume imported from existingLV",
"volume_type": null,
"bootable": true,
"metadata": {
"key1": "value1",
"key2": "value2"
}
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/volume-manage-response.json 0000664 0000000 0000000 00000002125 15131732575 0030472 0 ustar 00root root 0000000 0000000 {
"volume": {
"status": "creating",
"user_id": "eae1472b5fc5496998a3d06550929e7e",
"attachments": [],
"links": [
{
"href": "http://10.0.2.15:8776/v2/87c8522052ca4eed98bc672b4c1a3ddb/volumes/23cf872b-c781-4cd4-847d-5f2ec8cbd91c",
"rel": "self"
},
{
"href": "http://10.0.2.15:8776/87c8522052ca4eed98bc672b4c1a3ddb/volumes/23cf872b-c781-4cd4-847d-5f2ec8cbd91c",
"rel": "bookmark"
}
],
"availability_zone": "az2",
"bootable": "false",
"encrypted": "false",
"created_at": "2014-07-18T00:12:54.000000",
"description": "Volume imported from existingLV",
"os-vol-tenant-attr:tenant_id": "87c8522052ca4eed98bc672b4c1a3ddb",
"volume_type": null,
"name": "New Volume",
"source_volid": null,
"snapshot_id": null,
"metadata": {
"key2": "value2",
"key1": "value1"
},
"id": "23cf872b-c781-4cd4-847d-5f2ec8cbd91c",
"size": 0
}
}
volume-metadata-create-request.json 0000664 0000000 0000000 00000000070 15131732575 0032033 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"metadata": {
"name": "metadata0"
}
}
volume-metadata-create-response.json 0000664 0000000 0000000 00000000070 15131732575 0032201 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"metadata": {
"name": "metadata0"
}
}
volume-metadata-show-key-response.json 0000664 0000000 0000000 00000000057 15131732575 0032511 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"meta": {
"name": "test"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/volume-metadata-show-response.json0000664 0000000 0000000 00000000027 15131732575 0031777 0 ustar 00root root 0000000 0000000 {
"metadata": {}
}
volume-metadata-update-key-request.json 0000664 0000000 0000000 00000000053 15131732575 0032641 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"meta": {
"name": "new_name"
}
}
volume-metadata-update-key-response.json 0000664 0000000 0000000 00000000053 15131732575 0033007 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"meta": {
"name": "new_name"
}
}
volume-metadata-update-request.json 0000664 0000000 0000000 00000000070 15131732575 0032052 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"metadata": {
"name": "metadata1"
}
}
volume-metadata-update-response.json 0000664 0000000 0000000 00000000070 15131732575 0032220 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"metadata": {
"name": "metadata1"
}
}
volume-os-migrate_volume-request.json 0000664 0000000 0000000 00000000100 15131732575 0032442 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"os-migrate_volume": {
"host": "node1@lvm"
}
} volume-os-migrate_volume_completion-request.json 0000664 0000000 0000000 00000000205 15131732575 0034701 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"os-migrate_volume_completion": {
"new_volume": "2b955850-f177-45f7-9f49-ecb2c256d161",
"error": false
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/volume-os-retype-request.json 0000664 0000000 0000000 00000000161 15131732575 0031021 0 ustar 00root root 0000000 0000000 {
"os-retype": {
"new_type": "dedup-tier-replicaton",
"migration_policy": "never"
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/volume-show-response.json 0000664 0000000 0000000 00000002014 15131732575 0030217 0 ustar 00root root 0000000 0000000 {
"volume": {
"status": "available",
"attachments": [],
"links": [
{
"href": "http://localhost:8776/v2/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/5aa119a8-d25b-45a7-8d1b-88e127885635",
"rel": "self"
},
{
"href": "http://localhost:8776/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/5aa119a8-d25b-45a7-8d1b-88e127885635",
"rel": "bookmark"
}
],
"availability_zone": "nova",
"bootable": "false",
"os-vol-host-attr:host": "ip-10-168-107-25",
"source_volid": null,
"snapshot_id": null,
"id": "5aa119a8-d25b-45a7-8d1b-88e127885635",
"description": "Super volume.",
"name": "vol-002",
"created_at": "2013-02-25T02:40:21.000000",
"volume_type": "None",
"os-vol-tenant-attr:tenant_id": "0c2eba2c5af04d3f9e9d0d410b371fde",
"size": 1,
"metadata": {
"contents": "not junk"
}
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/volume-status-reset-request.json 0000664 0000000 0000000 00000000217 15131732575 0031537 0 ustar 00root root 0000000 0000000 {
"os-reset_status": {
"status": "available",
"attach_status": "detached",
"migration_status": "migrating"
}
}
volume-transfer-accept-request.json 0000664 0000000 0000000 00000000101 15131732575 0032066 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"accept": {
"auth_key": "9266c59563c84664"
}
}
volume-transfer-accept-response.json 0000664 0000000 0000000 00000000727 15131732575 0032252 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"transfer": {
"id": "cac5c677-73a9-4288-bb9c-b2ebfb547377",
"name": "first volume transfer",
"volume_id": "894623a6-e901-4312-aa06-4275e6321cce",
"links": [
{
"href": "http://localhost/v2/firstproject/volumes/1",
"rel": "self"
},
{
"href": "http://localhost/firstproject/volumes/1",
"rel": "bookmark"
}
]
}
}
volume-transfer-create-request.json 0000664 0000000 0000000 00000000170 15131732575 0032100 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"transfer": {
"volume_id": "c86b9af4-151d-4ead-b62c-5fb967af0e37",
"name": "first volume"
}
}
volume-transfer-create-response.json 0000664 0000000 0000000 00000001052 15131732575 0032246 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"transfer": {
"id": "1a7059f5-8ed7-45b7-8d05-2811e5d09f24",
"created_at": "2015-02-25T03:56:53.081642",
"name": "first volume",
"volume_id": "c86b9af4-151d-4ead-b62c-5fb967af0e37",
"auth_key": "9266c59563c84664",
"links": [
{
"href": "http://localhost/v2/firstproject/volumes/3",
"rel": "self"
},
{
"href": "http://localhost/firstproject/volumes/3",
"rel": "bookmark"
}
]
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/volume-transfer-show-response.json0000664 0000000 0000000 00000001013 15131732575 0032037 0 ustar 00root root 0000000 0000000 {
"transfer": {
"id": "cac5c677-73a9-4288-bb9c-b2ebfb547377",
"created_at": "2015-02-25T03:56:53.081642",
"name": "first volume transfer",
"volume_id": "894623a6-e901-4312-aa06-4275e6321cce",
"links": [
{
"href": "http://localhost/v2/firstproject/volumes/1",
"rel": "self"
},
{
"href": "http://localhost/firstproject/volumes/1",
"rel": "bookmark"
}
]
}
}
volume-transfers-list-detailed-response.json 0000664 0000000 0000000 00000002225 15131732575 0033715 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"transfers": [
{
"id": "cac5c677-73a9-4288-bb9c-b2ebfb547377",
"created_at": "2015-02-25T03:56:53.081642",
"name": "first volume transfer",
"volume_id": "894623a6-e901-4312-aa06-4275e6321cce",
"links": [
{
"href": "http://localhost/v2/firstproject/volumes/1",
"rel": "self"
},
{
"href": "http://localhost/firstproject/volumes/1",
"rel": "bookmark"
}
]
},
{
"id": "f26c0dee-d20d-4e80-8dee-a8d91b9742a1",
"created_at": "2015-03-25T03:56:53.081642",
"name": "second volume transfer",
"volume_id": "673db275-379f-41af-8371-e1652132b4c1",
"links": [
{
"href": "http://localhost/v2/firstproject/volumes/2",
"rel": "self"
},
{
"href": "http://localhost/firstproject/volumes/2",
"rel": "bookmark"
}
]
}
]
}
volume-transfers-list-response.json 0000664 0000000 0000000 00000002045 15131732575 0032144 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"transfers": [
{
"id": "cac5c677-73a9-4288-bb9c-b2ebfb547377",
"name": "first volume transfer",
"volume_id": "894623a6-e901-4312-aa06-4275e6321cce",
"links": [
{
"href": "http://localhost/v2/firstproject/volumes/1",
"rel": "self"
},
{
"href": "http://localhost/firstproject/volumes/1",
"rel": "bookmark"
}
]
},
{
"id": "f26c0dee-d20d-4e80-8dee-a8d91b9742a1",
"name": "second volume transfer",
"volume_id": "673db275-379f-41af-8371-e1652132b4c1",
"links": [
{
"href": "http://localhost/v2/firstproject/volumes/2",
"rel": "self"
},
{
"href": "http://localhost/firstproject/volumes/2",
"rel": "bookmark"
}
]
}
]
}
volume-type-access-add-request.json 0000664 0000000 0000000 00000000132 15131732575 0031757 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"addProjectAccess": {
"project": "f270b245cb11498ca4031deb7e141cfa"
}
}
volume-type-access-delete-request.json 0000664 0000000 0000000 00000000135 15131732575 0032474 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"removeProjectAccess": {
"project": "f270b245cb11498ca4031deb7e141cfa"
}
}
volume-type-access-list-response.json 0000664 0000000 0000000 00000000241 15131732575 0032351 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"volume_type_access": {
"volume_type_id": "3c67e124-39ad-4ace-a507-8bb7bf510c26",
"project_id": "f270b245cb11498ca4031deb7e141cfa"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/volume-type-create-request.json 0000664 0000000 0000000 00000000340 15131732575 0031313 0 ustar 00root root 0000000 0000000 {
"volume_type": {
"name": "vol-type-001",
"description": "volume type 0001",
"os-volume-type-access:is_public": true,
"extra_specs": {
"capabilities": "gpu"
}
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/volume-type-show-request.json 0000664 0000000 0000000 00000000272 15131732575 0031034 0 ustar 00root root 0000000 0000000 {
"volume_type": {
"id": "289da7f8-6440-407c-9fb4-7db01ec49164",
"name": "vol-type-001",
"extra_specs": {
"capabilities": "gpu"
}
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/volume-type-show-response.json 0000664 0000000 0000000 00000000401 15131732575 0031174 0 ustar 00root root 0000000 0000000 {
"volume_type": {
"id": "6685584b-1eac-4da6-b5c3-555430cf68ff",
"name": "vol-type-001",
"description": "volume type 001",
"is_public": "true",
"extra_specs": {
"capabilities": "gpu"
}
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/volume-type-update-request.json 0000664 0000000 0000000 00000000312 15131732575 0031331 0 ustar 00root root 0000000 0000000 {
"volume_type": {
"name": "vol-type-001",
"description": "volume type 0001",
"is_public": true,
"extra_specs": {
"capabilities": "gpu"
}
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/volume-types-list-response.json 0000664 0000000 0000000 00000000543 15131732575 0031361 0 ustar 00root root 0000000 0000000 {
"volume_types": [
{
"extra_specs": {
"capabilities": "gpu"
},
"id": "6685584b-1eac-4da6-b5c3-555430cf68ff",
"name": "SSD"
},
{
"extra_specs": {},
"id": "8eb69a46-df97-4e41-9586-9a40a7533803",
"name": "SATA"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/volume-unmanage-request.json 0000664 0000000 0000000 00000000032 15131732575 0030662 0 ustar 00root root 0000000 0000000 {
"os-unmanage": {}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/volume-update-request.json 0000664 0000000 0000000 00000000153 15131732575 0030355 0 ustar 00root root 0000000 0000000 {
"volume": {
"name": "vol-003",
"description": "This is yet, another volume."
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/volume-update-response.json 0000664 0000000 0000000 00000002237 15131732575 0030530 0 ustar 00root root 0000000 0000000 {
"volume": {
"status": "available",
"migration_status": null,
"user_id": "0eea4eabcf184061a3b6db1e0daaf010",
"attachments": [],
"links": [
{
"href": "http://localhost:8776/v2/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/5aa119a8-d25b-45a7-8d1b-88e127885635",
"rel": "self"
},
{
"href": "http://localhost:8776/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/5aa119a8-d25b-45a7-8d1b-88e127885635",
"rel": "bookmark"
}
],
"availability_zone": "nova",
"bootable": "false",
"encrypted": false,
"created_at": "2015-11-29T03:01:44.000000",
"description": "This is yet, another volume.",
"updated_at": null,
"volume_type": "lvmdriver-1",
"name": "vol-003",
"replication_status": "disabled",
"consistencygroup_id": null,
"source_volid": null,
"snapshot_id": null,
"multiattach": false,
"metadata": {
"contents": "not junk"
},
"id": "5aa119a8-d25b-45a7-8d1b-88e127885635",
"size": 1
}
}
volumes-list-detailed-response.json 0000664 0000000 0000000 00000010217 15131732575 0032073 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples {
"volumes": [
{
"migration_status": null,
"attachments": [
{
"server_id": "f4fda93b-06e0-4743-8117-bc8bcecd651b",
"attachment_id": "3b4db356-253d-4fab-bfa0-e3626c0b8405",
"host_name": null,
"volume_id": "6edbc2f4-1507-44f8-ac0d-eed1d2608d38",
"device": "/dev/vdb",
"id": "6edbc2f4-1507-44f8-ac0d-eed1d2608d38"
}
],
"links": [
{
"href": "http://23.253.248.171:8776/v2/bab7d5c60cd041a0a36f7c4b6e1dd978/volumes/6edbc2f4-1507-44f8-ac0d-eed1d2608d38",
"rel": "self"
},
{
"href": "http://23.253.248.171:8776/bab7d5c60cd041a0a36f7c4b6e1dd978/volumes/6edbc2f4-1507-44f8-ac0d-eed1d2608d38",
"rel": "bookmark"
}
],
"availability_zone": "nova",
"os-vol-host-attr:host": "difleming@lvmdriver-1#lvmdriver-1",
"encrypted": false,
"replication_status": "disabled",
"snapshot_id": null,
"id": "6edbc2f4-1507-44f8-ac0d-eed1d2608d38",
"size": 2,
"user_id": "32779452fcd34ae1a53a797ac8a1e064",
"os-vol-tenant-attr:tenant_id": "bab7d5c60cd041a0a36f7c4b6e1dd978",
"os-vol-mig-status-attr:migstat": null,
"metadata": {
"readonly": false,
"attached_mode": "rw"
},
"status": "in-use",
"description": null,
"multiattach": true,
"source_volid": null,
"consistencygroup_id": null,
"os-vol-mig-status-attr:name_id": null,
"name": "test-volume-attachments",
"bootable": "false",
"created_at": "2015-11-29T03:01:44.000000",
"volume_type": "lvmdriver-1"
},
{
"migration_status": null,
"attachments": [],
"links": [
{
"href": "http://23.253.248.171:8776/v2/bab7d5c60cd041a0a36f7c4b6e1dd978/volumes/173f7b48-c4c1-4e70-9acc-086b39073506",
"rel": "self"
},
{
"href": "http://23.253.248.171:8776/bab7d5c60cd041a0a36f7c4b6e1dd978/volumes/173f7b48-c4c1-4e70-9acc-086b39073506",
"rel": "bookmark"
}
],
"availability_zone": "nova",
"os-vol-host-attr:host": "difleming@lvmdriver-1#lvmdriver-1",
"encrypted": false,
"replication_status": "disabled",
"snapshot_id": null,
"id": "173f7b48-c4c1-4e70-9acc-086b39073506",
"size": 1,
"user_id": "32779452fcd34ae1a53a797ac8a1e064",
"os-vol-tenant-attr:tenant_id": "bab7d5c60cd041a0a36f7c4b6e1dd978",
"os-vol-mig-status-attr:migstat": null,
"metadata": {},
"status": "available",
"volume_image_metadata": {
"kernel_id": "8a55f5f1-78f7-4477-8168-977d8519342c",
"checksum": "eb9139e4942121f22bbc2afc0400b2a4",
"min_ram": "0",
"ramdisk_id": "5f6bdf8a-92db-4988-865b-60bdd808d9ef",
"disk_format": "ami",
"image_name": "cirros-0.3.4-x86_64-uec",
"image_id": "b48c53e1-9a96-4a5a-a630-2e74ec54ddcc",
"container_format": "ami",
"min_disk": "0",
"size": "25165824"
},
"description": "",
"multiattach": false,
"source_volid": null,
"consistencygroup_id": null,
"os-vol-mig-status-attr:name_id": null,
"name": "test-volume",
"bootable": "true",
"created_at": "2015-11-29T02:25:18.000000",
"volume_type": "lvmdriver-1"
}
],
"volumes_links": [{
"href": "https://158.69.65.111/volume/v2/4ad9f06ab8654e40befa59a2e7cac86d/volumes/detail?limit=1&marker=3b451d5d-9358-4a7e-a746-c6fd8b0e1462",
"rel": "next"
}]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/samples/volumes-list-response.json 0000664 0000000 0000000 00000002501 15131732575 0030376 0 ustar 00root root 0000000 0000000 {
"volumes": [
{
"id": "45baf976-c20a-4894-a7c3-c94b7376bf55",
"links": [
{
"href": "http://localhost:8776/v2/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/45baf976-c20a-4894-a7c3-c94b7376bf55",
"rel": "self"
},
{
"href": "http://localhost:8776/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/45baf976-c20a-4894-a7c3-c94b7376bf55",
"rel": "bookmark"
}
],
"name": "vol-004"
},
{
"id": "5aa119a8-d25b-45a7-8d1b-88e127885635",
"links": [
{
"href": "http://localhost:8776/v2/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/5aa119a8-d25b-45a7-8d1b-88e127885635",
"rel": "self"
},
{
"href": "http://localhost:8776/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/5aa119a8-d25b-45a7-8d1b-88e127885635",
"rel": "bookmark"
}
],
"name": "vol-003"
}
],
"volumes_links": [{
"href": "https://158.69.65.111/volume/v2/4ad9f06ab8654e40befa59a2e7cac86d/volumes/detail?limit=1&marker=3b451d5d-9358-4a7e-a746-c6fd8b0e1462",
"rel": "next"
}]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/volume-manage.inc 0000664 0000000 0000000 00000004107 15131732575 0024774 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Volume manage extension (os-volume-manage)
==========================================
Creates volumes by using existing storage instead of allocating new
storage.
Manage existing volume
~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/os-volume-manage
Creates a Block Storage volume by using existing storage rather than allocating
new storage.
The caller must specify a reference to an existing storage volume
in the ref parameter in the request. Although each storage driver
might interpret this reference differently, the driver should
accept a reference structure that contains either a source-id
or source-name element, if possible.
The API chooses the size of the volume by rounding up the size of
the existing storage volume to the next gibibyte (GiB).
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- description: description
- availability_zone: availability_zone
- bootable: bootable
- volume_type: volume_type
- name: name
- volume: volume
- host: host
- ref: ref
- metadata: metadata
- project_id: project_id_path
Request Example
---------------
.. literalinclude:: ./samples/volume-manage-request.json
:language: javascript
Response
--------
.. rest_parameters:: parameters.yaml
- volume: volume
- status: status_3
- migration_status: migration_status
- user_id: user_id
- attachments: attachments
- links: links_3
- availability_zone: availability_zone
- bootable: bootable_response
- encrypted: encrypted
- created_at: created_at
- description: description_5
- updated_at: updated_at
- volume_type: volume_type
- name: name_13
- replication_status: replication_status
- consistencygroup_id: consistencygroup_id
- source_volid: source_volid
- snapshot_id: snapshot_id
- multiattach: multiattach_resp
- metadata: metadata_1
- id: id_5
- size: size
Response Example
----------------
.. literalinclude:: ./samples/volume-manage-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/volume-type-access.inc 0000664 0000000 0000000 00000004331 15131732575 0025763 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Volume type access (volumes)
============================
Private volume type access to project.
By default, volumes types are public. To create a private volume
type, set the ``is_public`` boolean field to ``false`` at volume
type creation time. To control access to a private volume type,
user needs to add a project to or remove a project from the volume
type. Private volume types without projects are only accessible by
users with the administrative role and context.
Add private volume type access
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/types/{volume_type}/action
Adds private volume type access to a project.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project: project
- project_id: project_id_path
- volume_type: volume_type_access
Request Example
---------------
.. literalinclude:: ./samples/volume-type-access-add-request.json
:language: javascript
Remove private volume type access
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/types/{volume_type}/action
Removes private volume type access from a project.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project: project
- project_id: project_id_path
- volume_type: volume_type_access
Request Example
---------------
.. literalinclude:: ./samples/volume-type-access-delete-request.json
:language: javascript
List private volume type access details
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method::
GET /v2/{project_id}/types/{volume_type}/os-volume-type-access
Lists project IDs that have access to private volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_type: volume_type_access
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- project_id: project_id
Response Example
----------------
.. literalinclude:: ./samples/volume-type-access-list-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/volumes-v2-extensions.inc 0000664 0000000 0000000 00000001322 15131732575 0026447 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
API extensions (extensions)
===========================
List API extensions
~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/extensions
Lists Block Storage API extensions.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- updated: updated_at
- description: description
- links: links
- namespace: namespace
- alias: alias
- name: name
Response Example
----------------
.. literalinclude:: ./samples/extensions-list-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/volumes-v2-snapshots-actions.inc 0000664 0000000 0000000 00000001336 15131732575 0027735 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Snapshot actions (snapshots, action)
====================================
Administrator only. Resets status for a snapshot.
Reset snapshot's status
~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/snapshots/{snapshot_id}/action
Resets the status. Specify the ``os-reset_status`` action in the request body.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- status: status_2
- os-reset_status: os-reset_status
- project_id: project_id_path
- snapshot_id: snapshot_id
Request Example
---------------
.. literalinclude:: ./samples/snapshot-status-reset-request.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/volumes-v2-snapshots.inc 0000664 0000000 0000000 00000020714 15131732575 0026300 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Volume snapshots (snapshots)
============================
A snapshot is a point-in-time copy of the data that a volume
contains.
When you create, list, or delete snapshots, these status values are
possible:
**Snapshot statuses**
+----------------+---------------------------------------------+
| Status | Description |
+----------------+---------------------------------------------+
| creating | The snapshot is being created. |
+----------------+---------------------------------------------+
| available | The snapshot is ready to use. |
+----------------+---------------------------------------------+
| backing-up | The snapshot is being backed up. |
+----------------+---------------------------------------------+
| deleting | The snapshot is being deleted. |
+----------------+---------------------------------------------+
| error | A snapshot creation error occurred. |
+----------------+---------------------------------------------+
| deleted | The snapshot has been deleted. |
+----------------+---------------------------------------------+
| unmanaging | The snapshot is being unmanaged. |
+----------------+---------------------------------------------+
| restoring | The snapshot is being restored to a volume. |
+----------------+---------------------------------------------+
| error_deleting | A snapshot deletion error occurred. |
+----------------+---------------------------------------------+
List snapshots with details
~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/snapshots/detail
Lists all Block Storage snapshots, with details, that the project can access.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- status: status_2
- os-extended-snapshot-attributes:progress: os-ext-snap-attr:progress
- description: description
- created_at: created_at
- name: name
- volume_id: volume_id_5
- os-extended-snapshot-attributes:project_id: os-ext-snap-attr:project_id
- size: size
- id: id_4
- metadata: metadata
- updated_at: updated_at
- snapshots_links: links_5
Response Example
----------------
.. literalinclude:: ./samples/snapshots-list-detailed-response.json
:language: javascript
Create snapshot
~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/snapshots
Creates a volume snapshot, which is a point-in-time, complete copy of a volume.
You can create a volume from a snapshot.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- snapshot: snapshot
- volume_id: volume_id
- force: force
- description: description
- name: name
- project_id: project_id_path
Request Example
---------------
.. literalinclude:: ./samples/snapshot-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- status: status_2
- description: description
- created_at: created_at
- name: name
- snapshot: snapshot
- volume_id: volume_id_5
- metadata: metadata
- id: id_4
- size: size
List snapshots
--------------
.. rest_method:: GET /v2/{project_id}/snapshots
Lists all Block Storage snapshots, with summary information, that the project
can access.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
- sort_key: sort_key
- sort_dir: sort_dir
- limit: limit
- offset: offset
- marker: marker
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- status: status_2
- description: description
- created_at: created_at
- name: name
- volume_id: volume_id_5
- metadata: metadata
- id: id_4
- size: size
- updated_at: updated_at
- snapshots_links: links_5
Response Example
----------------
.. literalinclude:: ./samples/snapshots-list-response.json
:language: javascript
Show snapshot metadata
~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/snapshots/{snapshot_id}/metadata
Shows metadata for a snapshot.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- snapshot_id: snapshot_id
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- metadata: metadata
Response Example
----------------
.. literalinclude:: ./samples/snapshot-metadata-show-response.json
:language: javascript
Create snapshot metadata
~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/snapshots/{snapshot_id}/metadata
Updates metadata for a snapshot.
Creates or replaces metadata items that match keys. Does not modify items that
are not in the request.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- metadata: metadata
- project_id: project_id_path
- snapshot_id: snapshot_id
Request Example
---------------
.. literalinclude:: ./samples/snapshot-metadata-create-request.json
:language: javascript
Response Example
----------------
.. literalinclude:: ./samples/snapshot-metadata-create-response.json
:language: javascript
Update snapshot metadata
~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v2/{project_id}/snapshots/{snapshot_id}/metadata
Replaces all the snapshot's metadata with the key-value pairs in the request.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- metadata: metadata
- project_id: project_id_path
- snapshot_id: snapshot_id
Request Example
---------------
.. literalinclude:: ./samples/snapshot-metadata-update-request.json
:language: javascript
Response Example
----------------
.. literalinclude:: ./samples/snapshot-metadata-update-response.json
:language: javascript
Show snapshot details
~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/snapshots/{snapshot_id}
Shows details for a snapshot.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- snapshot_id: snapshot_id
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- status: status_2
- os-extended-snapshot-attributes:progress: os-ext-snap-attr:progress
- description: description
- created_at: created_at
- name: name
- snapshot: snapshot
- volume_id: volume_id_5
- os-extended-snapshot-attributes:project_id: os-ext-snap-attr:project_id
- size: size
- id: id_4
- metadata: metadata
- updated_at: updated_at
Response Example
----------------
.. literalinclude:: ./samples/snapshot-show-response.json
:language: javascript
Update snapshot
~~~~~~~~~~~~~~~
.. rest_method:: PUT /v2/{project_id}/snapshots/{snapshot_id}
Updates a snapshot.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- snapshot: snapshot
- description: description
- name: name
- project_id: project_id_path
- snapshot_id: snapshot_id
Request Example
---------------
.. literalinclude:: ./samples/snapshot-update-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- status: status_2
- description: description
- created_at: created_at
- name: name
- snapshot: snapshot
- volume_id: volume_id_5
- metadata: metadata
- id: id_4
- size: size
Response Example
----------------
.. literalinclude:: ./samples/snapshot-update-response.json
:language: javascript
Delete snapshot
~~~~~~~~~~~~~~~
.. rest_method:: DELETE /v2/{project_id}/snapshots/{snapshot_id}
Deletes a snapshot.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- snapshot_id: snapshot_id
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/volumes-v2-types.inc 0000664 0000000 0000000 00000022251 15131732575 0025420 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Volume types (types)
====================
Update volume type
~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v2/{project_id}/types/{volume_type_id}
Updates a volume type.
To create an environment with multiple-storage back ends, you must
specify a volume type. The API spawns Block Storage volume back
ends as children to ``cinder-volume``, and keys them from a unique
queue. The API names the back ends ``cinder-volume.HOST.BACKEND``.
For example, ``cinder-volume.ubuntu.lvmdriver``. When you create a
volume, the scheduler chooses an appropriate back end for the
volume type to handle the request.
For information about how to use volume types to create multiple-
storage back ends, see `Configure multiple-storage back ends
`_.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- volume_type: volume_type_1
- volume_type_id: volume_type_id
- project_id: project_id_path
Request Example
---------------
.. literalinclude:: ./samples/volume-type-update-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- is_public: is_public
- extra_specs: extra_specs
- description: description
- volume_type: volume_type_1
- name: name
Response Example
----------------
.. literalinclude:: ./samples/volume-type-show-response.json
:language: javascript
Update extra specs for a volume type
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v2/{project_id}/types/{volume_type_id}
Updates the extra specifications that are assigned to a volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- extra_specs: extra_specs
- volume_type: volume_type_1
- volume_type_id: volume_type_id
- project_id: project_id_path
Request Example
---------------
.. literalinclude:: ./samples/volume-type-update-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- is_public: is_public
- extra_specs: extra_specs
- description: description
- volume_type: volume_type_1
- name: name
Response Example
----------------
.. literalinclude:: ./samples/volume-type-show-response.json
:language: javascript
Show volume type details for v2
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/types/{volume_type_id}
Shows details for a volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- volume_type_id: volume_type_id
- project_id: project_id_path
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- is_public: is_public
- extra_specs: extra_specs
- description: description
- volume_type: volume_type_1
- name: name
Response Example
----------------
.. literalinclude:: ./samples/volume-type-show-response.json
:language: javascript
Delete volume type
~~~~~~~~~~~~~~~~~~
.. rest_method:: DELETE /v2/{project_id}/types/{volume_type_id}
Deletes a volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- volume_type_id: volume_type_id
- project_id: project_id_path
List all volume types for v2
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/types
Lists volume types.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- sort_key: sort_key
- sort_dir: sort_dir
- limit: limit
- offset: offset
- marker: marker
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- volume_types: volume_types
- extra_specs: extra_specs
- name: name
- volume_type: volume_type
Response Example
----------------
.. literalinclude:: ./samples/volume-types-list-response.json
:language: javascript
Create volume type for v2
~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/types
Creates a volume type.
To create an environment with multiple-storage back ends, you must
specify a volume type. Block Storage volume back ends are spawned
as children to ``cinder-volume``, and they are keyed from a unique
queue. They are named ``cinder-volume.HOST.BACKEND``. For example,
``cinder-volume.ubuntu.lvmdriver``. When a volume is created, the
scheduler chooses an appropriate back end to handle the request
based on the volume type.
For information about how to use volume types to create multiple-
storage back ends, see `Configure multiple-storage back ends
`_.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- volume_type: volume_type_1
- project_id: project_id_path
Request Example
---------------
.. literalinclude:: ./samples/volume-type-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- is_public: is_public
- extra_specs: extra_specs
- description: description
- volume_type: volume_type_1
- name: name
Response Example
----------------
.. literalinclude:: ./samples/volume-type-show-response.json
:language: javascript
Show an encryption type for v2
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/types/{volume_type_id}/encryption
Show an encryption type.
To show an encryption type for an existing volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- volume_type_id: volume_type_id
- project_id: project_id_path
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- volume_type_id: volume_type_id_body
- encryption_id: encryption_id
- encryption: encryption
- key_size: key_size
- provider: provider
- control_location: control_location
- cipher: cipher
- deleted: deleted
- created_at: created_at
- updated_at: updated_at
- deleted_at: deleted_at
Response Example
----------------
.. literalinclude:: ./samples/encryption-type-show-response.json
:language: javascript
Delete an encryption type for v2
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method::
GET /v2/{project_id}/types/{volume_type_id}/encryption/{encryption_id}
Delete an encryption type.
To delete an encryption type for an existing volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- volume_type_id: volume_type_id
- project_id: project_id_path
- encryption_id: encryption_id
Create an encryption type for v2
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/types/{volume_type_id}/encryption
Creates an encryption type.
To create an encryption type for an existing volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- volume_type_id: volume_type_id
- project_id: project_id_path
- encryption: encryption
- key_size: key_size
- provider: provider
- control_location: control_location
- cipher: cipher
Request Example
---------------
.. literalinclude:: ./samples/encryption-type-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- volume_type_id: volume_type_id_body
- encryption_id: encryption_id
- encryption: encryption
- key_size: key_size
- provider: provider
- control_location: control_location
- cipher: cipher
Response Example
----------------
.. literalinclude:: ./samples/encryption-type-create-response.json
:language: javascript
Update an encryption type for v2
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method::
PUT /v2/{project_id}/types/{volume_type_id}/encryption/{encryption_id}
Update an encryption type.
To update an encryption type for an existing volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- volume_type_id: volume_type_id
- project_id: project_id_path
- encryption_id: encryption_id
- encryption: encryption
- key_size: key_size
- provider: provider_optional
- control_location: control_location
- cipher: cipher
Request Example
---------------
.. literalinclude:: ./samples/encryption-type-update-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- encryption: encryption
- key_size: key_size
- provider: provider_optional
- control_location: control_location
- cipher: cipher
Response Example
----------------
.. literalinclude:: ./samples/encryption-type-update-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/volumes-v2-versions.inc 0000664 0000000 0000000 00000000764 15131732575 0026131 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
API version details
===================
Show API v2 details
~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/
Shows details for Block Storage API v2.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- location: location
Response Example
----------------
.. literalinclude:: ./samples/version-show-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/volumes-v2-volumes-actions.inc 0000664 0000000 0000000 00000032404 15131732575 0027405 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Volume actions (volumes, action)
================================
Extends the size of, resets statuses for, sets image metadata for,
and removes image metadata from a volume. Attaches a volume to a
server, detaches a volume from a server, and removes a volume from
Block Storage management without actually removing the back-end
storage object associated with it.
Extend volume size
~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action
Extends the size of a volume to a requested size, in gibibytes (GiB). Specify
the ``os-extend`` action in the request body.
Preconditions
- Volume status must be ``available``.
- Sufficient amount of storage must exist to extend the volume.
- The user quota must have sufficient volume storage.
Troubleshooting
- An ``error_extending`` volume status indicates that the request
failed. Ensure that you meet the preconditions and retry the
request. If the request fails again, investigate the storage back
end.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- os-extend: os-extend
- new_size: new_size
- project_id: project_id_path
- volume_id: volume_id_path
Request Example
---------------
.. literalinclude:: ./samples/volume-extend-request.json
:language: javascript
Reset volume statuses
~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action
Administrator only. Resets the status, attach status, and migration status for
a volume. Specify the ``os-reset_status`` action in the request body.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- status: status_3
- migration_status: migration_status
- os-reset_status: os-reset_status
- attach_status: attach_status
- project_id: project_id_path
- volume_id: volume_id_path
Request Example
---------------
.. literalinclude:: ./samples/volume-status-reset-request.json
:language: javascript
Set image metadata for volume
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action
Sets the image metadata for a volume. Specify the ``os-set_image_metadata``
action in the request body.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- os-set_image_metadata: os-set_image_metadata
- metadata: metadata
- project_id: project_id_path
- volume_id: volume_id_path
Request Example
---------------
.. literalinclude:: ./samples/volume-image-metadata-set-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- metadata: metadata_8
Response Example
----------------
.. literalinclude:: ./samples/volume-image-metadata-update-response.json
:language: javascript
Remove image metadata from volume
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action
Removes image metadata, by key, from a volume. Specify the
``os-unset_image_metadata`` action in the request body and the ``key`` for the
metadata key and value pair that you want to remove.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- os-unset_image_metadata: os-unset_image_metadata
- key: key
- project_id: project_id_path
- volume_id: volume_id_path
Request Example
---------------
.. literalinclude:: ./samples/volume-image-metadata-unset-request.json
:language: javascript
Show image metadata for volume
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action
Shows image metadata for a volume.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- os-show_image_metadata: os-show_image_metadata
Request Example
---------------
.. literalinclude:: ./samples/image-metadata-show-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- metadata: metadata_8
Response Example
----------------
.. literalinclude:: ./samples/image-metadata-show-response.json
:language: javascript
Attach volume to server
~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action
Attaches a volume to a server. Specify the ``os-attach`` action in the request
body.
Preconditions
- Volume status must be ``available``.
- You should set ``instance_uuid`` or ``host_name``.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- instance_uuid: instance_uuid
- mountpoint: mountpoint
- host_name: host_name
- os-attach: os-attach
- project_id: project_id_path
- volume_id: volume_id_path
Request Example
---------------
.. literalinclude:: ./samples/volume-attach-request.json
:language: javascript
Detach volume from a server
~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action
Detaches a volume from a server. Specify the ``os-detach`` action in the
request body.
Preconditions
- Volume status must be ``in-use``.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- attachment_id: attachment_id
- os-detach: os-detach
- project_id: project_id_path
- volume_id: volume_id_path
Request Example
---------------
.. literalinclude:: ./samples/volume-detach-request.json
:language: javascript
Unmanage volume
~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action
Removes a volume from Block Storage management without removing the back-end
storage object that is associated with it. Specify the ``os-unmanage`` action
in the request body.
Preconditions
- Volume status must be ``available``.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- os-unmanage: os-unmanage
- project_id: project_id_path
- volume_id: volume_id_path
Request Example
---------------
.. literalinclude:: ./samples/volume-unmanage-request.json
:language: javascript
Force detach volume
~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action
Forces a volume to detach. Specify the ``os-force_detach`` action in the
request body.
Rolls back an unsuccessful detach operation after you disconnect
the volume.
Policy defaults enable only users with the administrative role to
perform this operation. Cloud providers can change these permissions
through the ``volume_extension:volume_admin_actions:force_detach`` rule in
the policy configuration file.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- connector: connector
- attachment_id: attachment_id
- os-force_detach: os-force_detach
- project_id: project_id_path
- volume_id: volume_id_path
Request Example
---------------
.. literalinclude:: ./samples/volume-force-detach-request.json
:language: javascript
Retype volume
~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action
Change type of existing volume. Specify the ``os-retype`` action in the request
body.
Change the volume type of existing volume, Cinder may migrate the volume to
proper volume host according to the new volume type.
Retyping an *in-use* volume from a multiattach-capable type to a
non-multiattach-capable type, or vice-versa, is not supported. It is generally
not recommended to retype an *in-use* multiattach volume if that volume has
more than one active read/write attachment.
Policy defaults enable only users with the administrative role or the owner of
the volume to perform this operation. Cloud providers can change these
permissions through the policy configuration file.
Retyping an unencrypted volume to the same size encrypted volume will most
likely fail. Even though the volume is the same size as the source volume, the
encrypted volume needs to store additional encryption information overhead.
This results in the new volume not being large enough to hold all data.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- new_type: new_type
- migration_policy: migration_policy
- os-retype: os-retype
- volume_id: volume_id_path
- project_id: project_id_path
Request Example
---------------
.. literalinclude:: ./samples/volume-os-retype-request.json
:language: javascript
Migrate volume
~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action
Specify the ``os-migrate_volume`` action in the request body.
Migrates a volume to the specified host.
It is generally not recommended to migrate an *in-use* multiattach volume if
that volume has more than one active read/write attachment.
Policy defaults enable only users with the administrative role to perform this
operation. Cloud providers can change these permissions through the
policy configuration file.
**Preconditions**
* The volume ``status`` must be ``available`` or ``in-use``.
* The volume ``migration_status`` must be ``None``, ``deleting``, ``error``,
or ``success``.
* The volume ``replication_status`` must be ``None``, ``disabled`` or
``not-capable``.
* The migration must happen to another host from which the
volume currently resides.
* The volume must not be a member of a group.
* The volume must not have snapshots.
**Asynchronous Postconditions**
On success, the volume ``status`` will return to its original status of
``available`` or ``in-use`` and the ``migration_status`` will be ``success``.
On failure, the ``migration_status`` will be ``error``. In the case of failure,
if ``lock_volume`` was true and the volume was originally ``available`` when
it was migrated, the ``status`` will go back to ``available``.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- volume_id: volume_id_path
- project_id: project_id_path
- os-migrate_volume: os-migrate_volume
- host: migrate_host
- force_host_copy: migrate_force_host_copy
- lock_volume: migrate_lock_volume
Request Example
---------------
.. literalinclude:: ./samples/volume-os-migrate_volume-request.json
:language: javascript
Complete migration of a volume
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action
Specify the ``os-migrate_volume_completion`` action in the request body.
Complete the migration of a volume, updating the new volume in the DB,
returning the ``status`` of the new volume to that of the original volume
and finally deleting the original volume.
**Preconditions**
* Both the original and new volume ``migration_status`` must be ``None`` or
both must be set to a non ``None`` value.
* Additionally when set the new volume ``migration_status`` must take the
form of ``target:VOLUME_UUID`` where VOLUME_UUID is the original volume UUID.
**Asynchronous Postconditions**
On success, the volume ``status`` will return to its original status of
``available`` or ``in-use`` and the ``migration_status`` will be ``success``.
On failure, the ``migration_status`` will be ``error``.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- volume_id: volume_id_path
- project_id: project_id_path
- os-migrate_volume_completion: os-migrate_volume_completion
- new_volume: new_volume
- error: migration_completion_error
Request Example
---------------
.. literalinclude:: ./samples/volume-os-migrate_volume_completion-request.json
:language: javascript
Force delete volume
~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action
Attempts force-delete of volume, regardless of state. Specify the
``os-force_delete`` action in the request body.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- os-force_delete: os-force_delete
- project_id: project_id_path
- volume_id: volume_id_path
Request Example
---------------
.. literalinclude:: ./samples/volume-force-delete-request.json
:language: javascript
Update volume bootable status
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action
Update the bootable status for a volume, mark it as a bootable volume. Specify
the ``os-set_bootable`` action in the request body.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- os-set_bootable: os-set_bootable
- bootable: bootable
Request Example
---------------
.. literalinclude:: ./samples/volume-bootable-status-update-request.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v2/volumes-v2-volumes.inc 0000664 0000000 0000000 00000040125 15131732575 0025746 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Volumes (volumes)
=================
A volume is a detachable block storage device similar to a USB hard
drive. You can attach a volume to one instance at a time.
The ``snapshot_id`` and ``source_volid`` parameters specify the ID
of the snapshot or volume from which this volume originates. If the
volume was not created from a snapshot or source volume, these
values are null.
When you create, list, update, or delete volumes, the possible
status values are:
**Volume statuses**
+------------------+--------------------------------------------------------+
| Status | Description |
+------------------+--------------------------------------------------------+
| creating | The volume is being created. |
+------------------+--------------------------------------------------------+
| available | The volume is ready to attach to an instance. |
+------------------+--------------------------------------------------------+
| attaching | The volume is attaching to an instance. |
+------------------+--------------------------------------------------------+
| detaching | The volume is detaching from an instance. |
+------------------+--------------------------------------------------------+
| in-use | The volume is attached to an instance. |
+------------------+--------------------------------------------------------+
| maintenance | The volume is locked and being migrated. |
+------------------+--------------------------------------------------------+
| deleting | The volume is being deleted. |
+------------------+--------------------------------------------------------+
| awaiting-transfer| The volume is awaiting for transfer. |
+------------------+--------------------------------------------------------+
| error | A volume creation error occurred. |
+------------------+--------------------------------------------------------+
| error_deleting | A volume deletion error occurred. |
+------------------+--------------------------------------------------------+
| backing-up | The volume is being backed up. |
+------------------+--------------------------------------------------------+
| restoring-backup | A backup is being restored to the volume. |
+------------------+--------------------------------------------------------+
| error_backing-up | A backup error occurred. |
+------------------+--------------------------------------------------------+
| error_restoring | A backup restoration error occurred. |
+------------------+--------------------------------------------------------+
| error_extending | An error occurred while attempting to extend a volume. |
+------------------+--------------------------------------------------------+
| downloading | The volume is downloading an image. |
+------------------+--------------------------------------------------------+
| uploading | The volume is being uploaded to an image. |
+------------------+--------------------------------------------------------+
| retyping | The volume is changing type to another volume type. |
+------------------+--------------------------------------------------------+
| extending | The volume is being extended. |
+------------------+--------------------------------------------------------+
List volumes with details
~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/volumes/detail
Lists all Block Storage volumes, with details, that the project can access.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
- sort: sort
- limit: limit
- offset: offset
- marker: marker
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- migration_status: migration_status
- attachments: attachments
- links: links
- availability_zone: availability_zone
- os-vol-host-attr:host: os-vol-host-attr:host
- encrypted: encrypted
- updated_at: updated_at
- replication_status: replication_status
- snapshot_id: snapshot_id
- id: id
- size: size
- user_id: user_id
- os-vol-tenant-attr:tenant_id: os-vol-tenant-attr:tenant_id
- os-vol-mig-status-attr:migstat: os-vol-mig-status-attr:migstat
- metadata: metadata
- status: status_3
- volume_image_metadata: volume_image_metadata
- description: description
- multiattach: multiattach_resp
- source_volid: source_volid
- consistencygroup_id: consistencygroup_id
- os-vol-mig-status-attr:name_id: os-vol-mig-status-attr:name_id
- name: name
- bootable: bootable_response
- created_at: created_at
- volumes: volumes
- volume_type: volume_type
- volumes_links: links_vol_optional
Response Example
----------------
.. literalinclude:: ./samples/volumes-list-detailed-response.json
:language: javascript
Create volume
~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/volumes
Creates a volume.
To create a bootable volume, include the UUID of the image from
which you want to create the volume in the ``imageRef`` attribute
in the request body.
Preconditions
- You must have enough volume storage quota remaining to create a
volume of size requested.
Asynchronous Postconditions
- With correct permissions, you can see the volume status as
``available`` through API calls.
- With correct access, you can see the created volume in the storage
system that OpenStack Block Storage manages.
Troubleshooting
- If volume status remains ``creating`` or shows another error
status, the request failed. Ensure you meet the preconditions
then investigate the storage back end.
- Volume is not created in the storage system that OpenStack Block
Storage manages.
- The storage node needs enough free storage space to match the size
of the volume creation request.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- size: size
- description: description_9
- imageRef: imageRef
- availability_zone: availability_zone
- source_volid: source_volid
- name: volume_name_optional
- volume: volume
- consistencygroup_id: consistencygroup_id_1
- volume_type: volume_type_2
- snapshot_id: snapshot_id
- OS-SCH-HNT:scheduler_hints: OS-SCH-HNT:scheduler_hints
- metadata: metadata_2
- project_id: project_id_path
Request Example
---------------
.. literalinclude:: ./samples/volume-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- migration_status: migration_status
- attachments: attachments
- links: links
- availability_zone: availability_zone
- encrypted: encrypted
- updated_at: updated_at
- replication_status: replication_status
- snapshot_id: snapshot_id
- id: id
- size: size
- user_id: user_id
- metadata: metadata
- status: status_3
- description: description
- multiattach: multiattach_resp
- source_volid: source_volid
- volume: volume
- consistencygroup_id: consistencygroup_id
- name: name
- bootable: bootable_response
- created_at: created_at
- volume_type: volume_type
Response Example
----------------
.. literalinclude:: ./samples/volume-create-response.json
:language: javascript
List volumes
~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/volumes
Lists summary information for all Block Storage volumes that the project can
access.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
- sort: sort
- limit: limit
- offset: offset
- marker: marker
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- volumes: volumes
- id: id
- links: links
- name: name
- volumes_links: links_vol_optional
Response Example
----------------
.. literalinclude:: ./samples/volumes-list-response.json
:language: javascript
Show volume details
~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/volumes/{volume_id}
Shows details for a volume.
Preconditions
- The volume must exist.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- migration_status: migration_status
- attachments: attachments
- links: links
- availability_zone: availability_zone
- os-vol-host-attr:host: os-vol-host-attr:host
- encrypted: encrypted
- updated_at: updated_at
- replication_status: replication_status
- snapshot_id: snapshot_id
- id: id
- size: size
- user_id: user_id
- os-vol-tenant-attr:tenant_id: os-vol-tenant-attr:tenant_id
- os-vol-mig-status-attr:migstat: os-vol-mig-status-attr:migstat
- metadata: metadata
- status: status_3
- volume_image_metadata: volume_image_metadata
- description: description
- multiattach: multiattach_resp
- source_volid: source_volid
- volume: volume
- consistencygroup_id: consistencygroup_id
- os-vol-mig-status-attr:name_id: os-vol-mig-status-attr:name_id
- name: name
- bootable: bootable_response
- created_at: created_at
- volume_type: volume_type
Response Example
----------------
.. literalinclude:: ./samples/volume-show-response.json
:language: javascript
Update volume
~~~~~~~~~~~~~
.. rest_method:: PUT /v2/{project_id}/volumes/{volume_id}
Updates a volume.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- volume: volume
- description: description
- name: name
- metadata: metadata_2
- project_id: project_id_path
- volume_id: volume_id_path
Request Example
---------------
.. literalinclude:: ./samples/volume-update-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- migration_status: migration_status
- attachments: attachments
- links: links
- availability_zone: availability_zone
- encrypted: encrypted
- updated_at: updated_at
- replication_status: replication_status
- snapshot_id: snapshot_id
- id: id
- size: size
- user_id: user_id
- metadata: metadata_3
- status: status_3
- description: description
- multiattach: multiattach_resp
- source_volid: source_volid
- volume: volume
- consistencygroup_id: consistencygroup_id
- name: name
- bootable: bootable_response
- created_at: created_at
- volume_type: volume_type
Response Example
----------------
.. literalinclude:: ./samples/volume-update-response.json
:language: javascript
Delete volume
~~~~~~~~~~~~~
.. rest_method:: DELETE /v2/{project_id}/volumes/{volume_id}
Deletes a volume.
Preconditions
- Volume status must be ``available``, ``in-use``, ``error``, or
``error_restoring``.
- You cannot already have a snapshot of the volume.
- You cannot delete a volume that is in a migration.
Asynchronous Postconditions
- The volume is deleted in volume index.
- The volume managed by OpenStack Block Storage is deleted in
storage node.
Troubleshooting
- If volume status remains in ``deleting`` or becomes
``error_deleting`` the request failed. Ensure you meet the
preconditions then investigate the storage back end.
- The volume managed by OpenStack Block Storage is not deleted from
the storage system.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- cascade: cascade
Create volume metadata
~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/metadata
Creates or replaces metadata for a volume. Does not modify items that are not
in the request.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- metadata: metadata_3
- project_id: project_id_path
- volume_id: volume_id_path
Request Example
---------------
.. literalinclude:: ./samples/volume-metadata-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- metadata: metadata_3
Response Example
----------------
.. literalinclude:: ./samples/volume-metadata-create-response.json
:language: javascript
Show volume metadata
~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/volumes/{volume_id}/metadata
Shows metadata for a volume.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- metadata: metadata_3
Response Example
----------------
.. literalinclude:: ./samples/volume-metadata-show-response.json
:language: javascript
Update volume metadata
~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v2/{project_id}/volumes/{volume_id}/metadata
Replaces all the volume's metadata with the key-value pairs in the request.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- metadata: metadata_3
- project_id: project_id_path
- volume_id: volume_id_path
Request Example
---------------
.. literalinclude:: ./samples/volume-metadata-update-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- metadata: metadata_3
Response Example
----------------
.. literalinclude:: ./samples/volume-metadata-update-response.json
:language: javascript
Show volume metadata for a specific key
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v2/{project_id}/volumes/{volume_id}/metadata/{key}
Shows metadata for a volume for a specific key.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- key: key_2
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- meta: meta
Response Example
----------------
.. literalinclude:: ./samples/volume-metadata-show-key-response.json
:language: javascript
Delete volume metadata
~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: DELETE /v2/{project_id}/volumes/{volume_id}/metadata/{key}
Deletes metadata for a volume.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- key: key_1
Update volume metadata for a specific key
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v2/{project_id}/volumes/{volume_id}/metadata/{key}
Update metadata for a volume for a specific key.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- key: key_3
- meta: meta
Request Example
---------------
.. literalinclude:: ./samples/volume-metadata-update-key-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- meta: meta
Response Example
----------------
.. literalinclude:: ./samples/volume-metadata-update-key-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/ 0000775 0000000 0000000 00000000000 15131732575 0021543 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/api-versions.inc 0000664 0000000 0000000 00000001034 15131732575 0024653 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
API versions
============
List All Api Versions
~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /
Lists information for all Block Storage API versions.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 300
.. rest_status_code:: error ../status.yaml
- 400
- 401
- 403
- 405
- 404
- 409
- 500
- 503
Request
-------
Response
--------
**Example List Api Versions: JSON request**
.. literalinclude:: ./samples/versions/versions-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/attachments.inc 0000664 0000000 0000000 00000021430 15131732575 0024551 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Attachments (attachments)
=========================
Lists all, lists all with details, shows details for, creates, and
deletes attachment.
.. note:: Everything except for `Complete attachment` is new as of the 3.27
microversion. `Complete attachment` is new as of the 3.44
microversion.
When you create, list, update, or delete attachment, the possible
status values are:
**VolumeAttachment statuses**
+------------------+--------------------------------------------------------+
| Status | Description |
+------------------+--------------------------------------------------------+
| attached | A volume is attached for the attachment. |
+------------------+--------------------------------------------------------+
| attaching | A volume is attaching for the attachment. |
+------------------+--------------------------------------------------------+
| detached | A volume is detached for the attachment. |
+------------------+--------------------------------------------------------+
| reserved | A volume is reserved for the attachment. |
+------------------+--------------------------------------------------------+
| error_attaching | A volume is error attaching for the attachment. |
+------------------+--------------------------------------------------------+
| error_detaching | A volume is error detaching for the attachment. |
+------------------+--------------------------------------------------------+
| deleted | The attachment is deleted. |
+------------------+--------------------------------------------------------+
Delete attachment
~~~~~~~~~~~~~~~~~
.. rest_method:: DELETE /v3/{project_id}/attachments/{attachment_id}
Deletes an attachment.
For security reasons (see bug `#2004555
`_) the Block Storage API rejects
REST API calls manually made from users with a 409 status code if there is a
Nova instance currently using the attachment, which happens when all the
following conditions are met:
- Attachment has an instance uuid
- VM exists in Nova
- Instance has the volume attached
- Attached volume in instance is using the attachment
Calls coming from other OpenStack services (like the Compute Service) are
always accepted.
Available starting in the 3.27 microversion.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 404
- 409
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- attachment_id: attachment_id_path
Show attachment details
~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/attachments/{attachment_id}
Shows details for an attachment.
Available starting in the 3.27 microversion.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- attachment_id: attachment_id_path
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- status: status_attachment
- detached_at: detached_at
- connection_info: connection_info
- attached_at: attached_at
- attach_mode: attach_mode_required
- instance: instance_uuid_req
- volume_id: volume_id_attachment
- id: attachment_id_required
Response Example
----------------
.. literalinclude:: ./samples/attachment-show-response.json
:language: javascript
List attachments with details
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/attachments/detail
Lists all attachments with details. Since v3.31 if non-admin
users specify invalid filters in the url, API will return bad request.
Available starting in the 3.27 microversion.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
- sort: sort
- sort_key: sort_key
- sort_dir: sort_dir
- limit: limit
- offset: offset
- marker: marker
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- status: status_attachment
- detached_at: detached_at
- connection_info: connection_info
- attached_at: attached_at
- attach_mode: attach_mode_required
- instance: instance_uuid_req
- volume_id: volume_id_attachment
- id: attachment_id_required
Response Example
----------------
.. literalinclude:: ./samples/attachment-list-detailed-response.json
:language: javascript
List attachments
~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/attachments
Lists all attachments, since v3.31 if non-admin users
specify invalid filters in the url, API will return bad request.
Available starting in the 3.27 microversion.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
- instance_id: instance_uuid_query_optional
- volume_id: volume_id_attachment_query_optional
- sort: sort
- sort_key: sort_key
- sort_dir: sort_dir
- limit: limit
- offset: offset
- marker: marker
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- status: status_attachment
- instance: instance_uuid_req
- volume_id: volume_id_attachment
- id: attachment_id_required
Response Example
----------------
.. literalinclude:: ./samples/attachment-list-response.json
:language: javascript
Create attachment
~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/attachments
Creates an attachment.
Available starting in the 3.27 microversion.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- attachment: attachment
- instance_uuid: instance_uuid
- connector: connector
- volume_uuid: volume_id_attachment
- mode: attach_mode
Request Example
---------------
.. literalinclude:: ./samples/attachment-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- attachment: attachment
- status: status_attachment
- detached_at: detached_at
- connection_info: connection_info
- attached_at: attached_at
- attach_mode: attach_mode_required
- instance: instance_uuid_req
- volume_id: volume_id_attachment
- id: attachment_id_required
Response Example
----------------
.. literalinclude:: ./samples/attachment-create-response.json
:language: javascript
Update an attachment
~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/{project_id}/attachments/{attachment_id}
Update a reserved attachment record with connector information
and set up the appropriate connection_info from the driver.
Available starting in the 3.27 microversion.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 404
- 409
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- attachment_id: attachment_id_path
- attachement: attachment
- connector: connector_required
Request Example
---------------
.. literalinclude:: ./samples/attachment-update-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- attachment: attachment
- status: status_attachment
- detached_at: detached_at
- connection_info: connection_info
- attached_at: attached_at
- attach_mode: attach_mode_required
- instance: instance_uuid_req
- volume_id: volume_id_attachment
- id: attachment_id_required
Response Example
----------------
.. literalinclude:: ./samples/attachment-update-response.json
:language: javascript
Complete attachment
~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/attachments/{attachment_id}/action
Complete an attachment for a cinder volume.
Available starting in the 3.44 microversion.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 204
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- attachment_id: attachment_id_path
Request Example
---------------
.. literalinclude:: ./samples/attachment-complete.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/availability-zones-v3.inc 0000664 0000000 0000000 00000001545 15131732575 0026377 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Availability zones (os-availability-zone)
=========================================
List availability zone information.
List Availability Zone Information
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/os-availability-zone
List availability zone information.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
Response Parameter
------------------
.. rest_parameters:: parameters.yaml
- project_id: project_id
- availabilityZoneInfo: availability_zone_info
- zoneName: availability_zone_required
- zoneState: availability_zone_state
- available: available
Response Example
----------------
.. literalinclude:: ./samples/availability-zone-list-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/capabilities-v3.inc 0000664 0000000 0000000 00000002257 15131732575 0025223 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Capabilities for storage back ends (capabilities)
=================================================
Shows capabilities for a storage back end.
Show all back-end capabilities
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/capabilities/{hostname}
Shows capabilities for a storage back end on the host.
The ``hostname`` takes the form of ``hostname@volume_backend_name``.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- hostname: hostname
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- pool_name: pool_name
- description: description_cap
- volume_backend_name: volume_backend_name
- namespace: namespace_storage
- visibility: visibility
- driver_version: driver_version
- vendor_name: vendor_name
- properties: properties
- storage_protocol: storage_protocol
- replication_targets: replication_targets
- display_name: display_name
Response Example
----------------
.. literalinclude:: ./samples/backend-capabilities-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/clusters.inc 0000664 0000000 0000000 00000016217 15131732575 0024111 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Clusters (clusters)
===================
Administrator only. Lists all Cinder clusters, show cluster detail,
enable or disable a cluster.
Each cinder service runs on a *host* computer (possibly multiple services
on the same host; it depends how you decide to deploy cinder). In order
to support High Availibility scenarios, services can be grouped into
*clusters* where the same type of service (for example, cinder-volume)
can run on different hosts so that if one host goes down the service is
still available on a different host. Since there's no point having these
services sitting around doing nothing while waiting for some other host
to go down (which is also known as Active/Passive mode), grouping services
into clusters also allows cinder to support Active/Active mode in which
all services in a cluster are doing work all the time.
.. note::
Currently the only service that can be grouped into clusters is
``cinder-volume``.
Clusters are determined by the deployment configuration; that's why there
is no 'create-cluster' API call listed below. Once your services are up
and running, however, you can use the following API requests to get
information about your clusters and to update their status.
Disable cluster
~~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/{project_id}/clusters/disable
Disables a cluster. Specify the cluster by its name and optionally the
binary name in the request body.
Available starting in the 3.7 microversion.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- name: cluster_name_required
- binary: cluster_binary
- disabled_reason: disabled_reason_body
Request Example
---------------
.. literalinclude:: ./samples/clusters/v3.7/cluster-disable-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- cluster: cluster
- name: cluster_name_resp
- binary: cluster_binary_resp
- state: cluster_state
- status: cluster_status
- replication_status: cluster_replication_status
- disabled_reason: disabled_reason_body
Response Example
----------------
.. literalinclude:: ./samples/clusters/v3.7/cluster-disable-response.json
:language: javascript
Enable cluster
~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/{project_id}/clusters/enable
Enables a cluster. Specify the cluster by its name and optionally the
binary name in the request body.
Available starting in the 3.7 microversion.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- name: cluster_name_required
- binary: cluster_binary
Request Example
---------------
.. literalinclude:: ./samples/clusters/v3.7/cluster-enable-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- cluster: cluster
- name: cluster_name_resp
- binary: cluster_binary_resp
- state: cluster_state
- status: cluster_status
- replication_status: cluster_replication_status
- disabled_reason: disabled_reason_body
Response Example
----------------
.. literalinclude:: ./samples/clusters/v3.7/cluster-enable-response.json
:language: javascript
Show cluster details
~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/clusters/{cluster_name}
Shows details for a cluster by its name and optionally the
binary name.
Available starting in the 3.7 microversion.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- cluster_name: cluster_name_path
- binary: cluster_binary_query
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- cluster: cluster
- name: cluster_name_resp
- binary: cluster_binary_resp
- state: cluster_state
- status: cluster_status
- num_hosts: cluster_num_hosts
- num_down_hosts: cluster_num_down_hosts
- last_heartbeat: cluster_last_heartbeat
- created_at: created_at
- updated_at: updated_at
- disabled_reason: disabled_reason_body
- replication_status: cluster_replication_status
- frozen: cluster_frozen
- active_backend_id: cluster_active_backend_id
Response Example
----------------
.. literalinclude:: ./samples/clusters/v3.7/cluster-show-response.json
:language: javascript
List clusters
~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/clusters
Lists all clusters.
Available starting in the 3.7 microversion.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- name: cluster_name_query
- binary: cluster_binary_query
- is_up: cluster_is_up_query
- disabled: cluster_disabled_query
- num_hosts: cluster_num_hosts_query
- num_down_hosts: cluster_num_down_hosts_query
- replication_status: cluster_replication_status_query
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- clusters: clusters
- name: cluster_name_resp
- binary: cluster_binary_resp
- state: cluster_state
- status: cluster_status
- replication_status: cluster_replication_status
Response Example
----------------
.. literalinclude:: ./samples/clusters/v3.7/clusters-list-response.json
:language: javascript
List clusters with details
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/clusters/detail
Lists all clusters with details.
Available starting in the 3.7 microversion.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- name: cluster_name_query
- binary: cluster_binary_query
- is_up: cluster_is_up_query
- disabled: cluster_disabled_query
- num_hosts: cluster_num_hosts_query
- num_down_hosts: cluster_num_down_hosts_query
- replication_status: cluster_replication_status_query
- frozen: cluster_frozen
- active_backend_id: cluster_active_backend_id
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- clusters: clusters
- name: cluster_name_resp
- binary: cluster_binary_resp
- state: cluster_state
- status: cluster_status
- num_hosts: cluster_num_hosts
- num_down_hosts: cluster_num_down_hosts
- last_heartbeat: cluster_last_heartbeat
- created_at: created_at
- updated_at: updated_at
- disabled_reason: disabled_reason_body
- replication_status: cluster_replication_status
- frozen: cluster_frozen
- active_backend_id: cluster_active_backend_id
Response Example
----------------
.. literalinclude:: ./samples/clusters/v3.7/clusters-list-detailed-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/consistencygroups-v3.inc 0000664 0000000 0000000 00000014052 15131732575 0026367 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Consistency groups (DEPRECATED)
===============================
Consistency groups enable you to create snapshots at the exact same
point in time from multiple volumes. For example, a database might
place its tables, logs, and configuration on separate volumes. To
restore this database from a previous point in time, it makes sense
to restore the logs, tables, and configuration together from the
exact same point in time.
Use the policy configuration file to grant permissions for these actions
to limit roles.
List project's consistency groups
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/consistencygroups
Lists consistency groups.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
- sort: sort
- sort_key: sort_key
- sort_dir: sort_dir
- limit: limit
- offset: offset
- marker: marker
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- consistencygroups: consistencygroups
- id: id
- name: name
Response Example
----------------
.. literalinclude:: ./samples/consistency-groups-list-response.json
:language: javascript
Create a consistency group
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/consistencygroups
Creates a consistency group.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- consistencygroup: consistencygroup
- description: description_consis
- availability_zone: availability_zone
- volume_types: volume_types_commas
- name: name_consis
Request Example
---------------
.. literalinclude:: ./samples/consistency-group-create-request.json
:language: javascript
Response
--------
.. rest_parameters:: parameters.yaml
- consistencygroup: consistencygroup
- status: status_consis
- description: description_cg
- availability_zone: availability_zone
- created_at: created_at
- volume_types: volume_types
- name: name_consis
- id: consistencygroup_id
Response Example
----------------
.. literalinclude:: ./samples/consistency-group-create-response.json
:language: javascript
Show a consistency group's details
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/consistencygroups/{consistencygroup_id}
Shows details for a consistency group.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- consistencygroup_id: consistencygroup_id_path
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- status: status_consis
- description: description_cg
- availability_zone: availability_zone
- created_at: created_at
- volume_types: volume_types
- id: id
- name: name
Response Example
----------------
.. literalinclude:: ./samples/consistency-group-show-response.json
:language: javascript
Create a consistency group from source
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/consistencygroups/create_from_src
Creates a consistency group from source.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- consistencygroup-from-src: consistencygroup-from-src
- status: status_consis
- user_id: user_id
- description: description_consis
- cgsnapshot_id: cgsnapshot_id
- source_cgid: source_cgid
- project_id: project_id
- name: name
Request Example
---------------
.. literalinclude:: ./samples/consistency-group-create-from-src-request.json
:language: javascript
Delete a consistency group
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method::
POST /v3/{project_id}/consistencygroups/{consistencygroup_id}/delete
Deletes a consistency group.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- consistencygroup_id: consistencygroup_id_path
- consistencygroup: consistencygroup
- force: force
Request Example
---------------
.. literalinclude:: ./samples/consistency-group-delete-request.json
:language: javascript
List consistency groups and details
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/consistencygroups/detail
Lists consistency groups with details.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
- sort: sort
- sort_key: sort_key
- sort_dir: sort_dir
- limit: limit
- offset: offset
- marker: marker
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- consistencygroups: consistencygroups
- status: status_consis
- description: description_cg
- availability_zone: availability_zone
- created_at: created_at
- volume_types: volume_types
- id: id
- name: name
Response Example
----------------
.. literalinclude:: ./samples/consistency-groups-list-detailed-response.json
:language: javascript
Update a consistency group
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method::
PUT /v3/{project_id}/consistencygroups/{consistencygroup_id}/update
Updates a consistency group.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- consistencygroup_id: consistencygroup_id_path
- consistencygroup: consistencygroup
- remove_volumes: remove_volumes
- description: description_consis
- add_volumes: add_volumes
- name: name
Request Example
---------------
.. literalinclude:: ./samples/consistency-group-update-request.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/default-types.inc 0000664 0000000 0000000 00000006433 15131732575 0025032 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Default Volume Types (default-types)
====================================
Manage a default volume type for individual projects.
By default, a volume-create request that does not specify a volume-type
will assign the configured system default volume type to the volume.
You can override this behavior on a per-project basis by setting a different
default volume type for any project.
Available in microversion 3.62 or higher.
NOTE: The default policy for list API is system admin so you would require
a system scoped token to access it.
To get a system scoped token, you need to run the following command:
openstack --os-system-scope all --os-project-name='' token issue
Create or update a default volume type
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/default-types/{project-id}
Create or update the default volume type for a project
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request Parameters
------------------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_type: volume_type_name_or_id
Request Example
---------------
.. literalinclude:: ./samples/set-default-type-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- project_id: project_id
- volume_type_id: volume_type_id
Response Example
----------------
.. literalinclude:: ./samples/set-default-type-response.json
:language: javascript
Show a default volume type
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/default-types/{project-id}
Show the default volume type for a project
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 404
Request Parameters
------------------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- project_id: project_id
- volume_type_id: volume_type_id
Response Example
----------------
.. literalinclude:: ./samples/get-default-type-response.json
:language: javascript
List default volume types
~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/default-types/
Get a list of all default volume types
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 404
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- project_id: project_id
- volume_type_id: volume_type_id
Response Example
----------------
.. literalinclude:: ./samples/get-default-types-response.json
:language: javascript
Delete a default volume type
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: DELETE /v3/default-types/{project-id}
Unset the default volume type for a project.
This operation does not do anything to the volume type itself.
It simply removes the volume type from being the default volume type for
the specified project.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 204
.. rest_status_code:: error ../status.yaml
- 404
Request Parameters
------------------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/ext-backups-actions-v3.inc 0000664 0000000 0000000 00000003067 15131732575 0026456 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Backup actions (backups, action)
================================
Force-deletes a backup and reset status for a backup.
Force-delete a backup
~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/backups/{backup_id}/action
Force-deletes a backup. Specify the ``os-force_delete`` action in the request
body.
This operation deletes the backup and any backup data.
The backup driver returns the ``405`` status code if it does not
support this operation.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 404
- 405
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- backup_id: backup_id_required
- os-force_delete: os-force_delete
Request Example
---------------
.. literalinclude:: ./samples/backup-force-delete-request.json
:language: javascript
Reset a backup's status
~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/backups/{backup_id}/action
Reset a backup's status. Specify the ``os-reset_status`` action in the request
body.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- backup_id: backup_id_required
- os-reset_status: os-reset_status
- status: status_backup_action
Request Example
---------------
.. literalinclude:: ./samples/backup-reset-status-request.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/ext-backups.inc 0000664 0000000 0000000 00000026006 15131732575 0024470 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Backups (backups)
=================
A backup is a full copy of a volume stored in an external service.
The service can be configured. The only supported service is Object
Storage. A backup can subsequently be restored from the external
service to either the same volume that the backup was originally
taken from or to a new volume.
When you create, list, or delete backups, these status values are
possible:
**Backup statuses**
+-----------------+---------------------------------------------+
| Status | Description |
+-----------------+---------------------------------------------+
| creating | The backup is being created. |
+-----------------+---------------------------------------------+
| available | The backup is ready to restore to a volume. |
+-----------------+---------------------------------------------+
| deleting | The backup is being deleted. |
+-----------------+---------------------------------------------+
| error | A backup error occurred. |
+-----------------+---------------------------------------------+
| restoring | The backup is being restored to a volume. |
+-----------------+---------------------------------------------+
| error_deleting | An error occurred while deleting the backup.|
+-----------------+---------------------------------------------+
If an error occurs, you can find more information about the error
in the ``fail_reason`` field for the backup.
List backups with detail
~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/backups/detail
Lists Block Storage backups, with details, to which the project has access,
since v3.31 if non-admin users specify invalid filters in the url,
API will return bad request.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
- sort: sort
- sort_key: sort_key
- sort_dir: sort_dir
- limit: limit
- offset: offset
- marker: marker
- with_count: with_count
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- backups: backups
- status: status_backup
- object_count: object_count
- fail_reason: fail_reason
- description: description
- links: links_backup
- availability_zone: availability_zone
- created_at: created_at
- updated_at: updated_at
- name: name_backup
- has_dependent_backups: has_dependent_backups
- volume_id: volume_id
- container: container
- size: size
- id: id_backup
- is_incremental: is_incremental
- data_timestamp: data_timestamp
- snapshot_id: snapshot_id_source_vol
- os-backup-project-attr:project_id: os-backup-project-attr:project_id
- count: count
- metadata: metadata_backup
- user_id: user_id_backup
- encryption_key_id: encryption_key_id
- backup_links: backup_links_optional
Response Example
----------------
.. literalinclude:: ./samples/backups/backups-list-detailed-response.json
:language: javascript
Show backup detail
~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/backups/{backup_id}
Shows details for a backup.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- backup_id: backup_id_required
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- backup: backup
- status: status_backup
- object_count: object_count
- container: container
- description: description
- links: links_backup
- availability_zone: availability_zone
- created_at: created_at
- updated_at: updated_at
- name: name_backup
- has_dependent_backups: has_dependent_backups
- volume_id: volume_id
- fail_reason: fail_reason
- size: size
- backup: backup
- id: id_backup
- is_incremental: is_incremental
- data_timestamp: data_timestamp
- snapshot_id: snapshot_id_source_vol
- os-backup-project-attr:project_id: os-backup-project-attr:project_id
- metadata: metadata_backup
- user_id: user_id_backup
- encryption_key_id: encryption_key_id
Response Example
----------------
.. literalinclude:: ./samples/backups/backup-show-response.json
:language: javascript
Delete a backup
~~~~~~~~~~~~~~~
.. rest_method:: DELETE /v3/{project_id}/backups/{backup_id}
Deletes a backup.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- backup_id: backup_id_required
Restore a backup
~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/backups/{backup_id}/restore
Restores a Block Storage backup to an existing or new Block Storage volume.
The name parameter will work only if a new volume is created.
If UUID is specified, the backup will be restored to the specified volume.
The specified volume has the following requirements:
* the specified volume status is ``available``.
* the size of specified volume must be equal to or greater than the size of
backup.
If no existing volume UUID is provided, the backup will be restored to a
new volume matching the size and name of the originally backed up volume.
In this case, if the name parameter is provided, it will be used as the
name of the new volume.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 413
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- backup_id: backup_id_required
- restore: restore
- name: volume_name_optional
- volume_id: volume_id_restore
Request Example
---------------
.. literalinclude:: ./samples/backup-restore-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- restore: restore
- backup_id: backup_id_required
- volume_id: volume_id
- volume_name: volume_name
Response Example
----------------
.. literalinclude:: ./samples/backup-restore-response.json
:language: javascript
Create a backup
~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/backups
Creates a Block Storage backup from a volume or snapshot.
The status of the volume must be ``available`` or if the ``force`` flag is
used, backups of ``in-use`` volumes may also be created.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- backup: backup
- volume_id: volume_id_backup
- container: container
- description: description
- incremental: incremental
- force: force
- name: name_optional
- snapshot_id: snapshot_id_backup
- metadata: metadata_backup
- availability_zone: availability_zone_backup
Request Example
---------------
.. literalinclude:: ./samples/backups/backup-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- backup: backup
- id: id_backup
- links: links_backup
- name: name_backup
- metadata: metadata_backup
Response Example
----------------
.. literalinclude:: ./samples/backups/backup-create-response.json
:language: javascript
Update a backup
~~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/{project_id}/backups/{backup_id}
Update a Block Storage backup. This API is available since v3.9.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- backup_id: backup_id_required
- backup: backup
- description: description
- name: name_optional
- metadata: metadata_backup
Request Example
---------------
.. literalinclude:: ./samples/backups/v3.9/backup-update-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- backup: backup
- id: id_backup
- links: links_backup
- name: name_backup
- metadata: metadata_backup
Response Example
----------------
.. literalinclude:: ./samples/backups/v3.9/backup-update-response.json
:language: javascript
List backups for project
~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/backups
Lists Block Storage backups to which the project has access,
since v3.31 if non-admin users specify invalid filters in the
url, API will return bad request.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
- sort: sort
- sort_key: sort_key
- sort_dir: sort_dir
- limit: limit
- offset: offset
- marker: marker
- with_count: with_count
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- backups: backups
- id: id_backup
- links: links_backup
- name: name_backup
- count: count
- backup_links: backup_links_optional
Response Example
----------------
.. literalinclude:: ./samples/backups/backups-list-response.json
:language: javascript
Export a backup
~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/backups/{backup_id}/export_record
Export information about a backup.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- backup_id: backup_id_required
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- backup-record: backup_record
- backup_service: backup_service
- backup_url: backup_url
Response Example
----------------
.. literalinclude:: ./samples/backup-record-export-response.json
:language: javascript
Import a backup
~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/backups/import_record
Import information about a backup.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 201
.. rest_status_code:: error ../status.yaml
- 400
- 503
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- backup-record: backup_record
- backup_service: backup_service
- backup_url: backup_url
Request Example
---------------
.. literalinclude:: ./samples/backup-record-import-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- id: id_backup
- links: links_backup
- name: name_backup
Response Example
----------------
.. literalinclude:: ./samples/backup-record-import-response.json
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/group-replication.inc 0000664 0000000 0000000 00000005256 15131732575 0025711 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Group replication (groups, action)
==================================
Lists targets, enables, disables, and fails over group replication.
Available since API microversion 3.38.
List replication targets
~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/groups/{group_id}/action
Lists replication targets for a group.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- group_id: group_id_path
Request Example
---------------
.. literalinclude:: ./samples/group-replication-list-targets.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- backend_id: backend_id_target
- unique_key: replication_targets_unique_key
Response Example
----------------
.. literalinclude:: ./samples/group-replication-target.json
:language: javascript
Enable group replication
~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/groups/{group_id}/action
Enable replication for a group.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- group_id: group_id_path
Request Example
---------------
.. literalinclude:: ./samples/group-replication-enable.json
:language: javascript
Disable group replication
~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/groups/{group_id}/action
Disable replication for a group.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- group_id: group_id_path
Request Example
---------------
.. literalinclude:: ./samples/group-replication-disable.json
:language: javascript
Failover replication
~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/groups/{group_id}/action
Failover a replicated group.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- group_id: group_id_path
- allow_attached_volume: allow_attached_volume
- secondary_backend_id: backend_id_target
Request Example
---------------
.. literalinclude:: ./samples/group-replication-failover.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/group-snapshots.inc 0000664 0000000 0000000 00000012661 15131732575 0025420 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Group snapshots (group_snapshots)
=================================
Lists all, lists all with details, shows details for, creates, and
deletes group snapshots.
Delete group snapshot
~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: DELETE /v3/{project_id}/group_snapshots/{group_snapshot_id}
Deletes a group snapshot.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- group_snapshot_id: group_snapshot_id_path
Show group snapshot details
~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/group_snapshots/{group_snapshot_id}
Shows details for a group snapshot.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- group_snapshot_id: group_snapshot_id_path
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- group_snapshot: group_snapshot
- created_at: created_at
- group_id: source_group_id_req
- id: group_snapshot_id_req
- name: name_group_snap_req
- status: status_group_snap
- description: description_group_snap_req
- group_type_id: group_type_id
- project_id: project_id_group_snapshot
Response Example
----------------
.. literalinclude:: ./samples/group-snapshots-show-response.json
:language: javascript
List group snapshots with details
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/group_snapshots/detail
Lists all group snapshots with details. Since v3.31 if non-admin
users specify invalid filters in the url, API will return bad request.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
- sort: sort_group_snapshot
- sort_key: sort_key_group_snapshot
- sort_dir: sort_dir_group_snapshot
- limit: limit_group_snapshot
- offset: offset_group_snapshot
- marker: marker_group_snapshot
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- group_snapshots: group_snapshots
- id: group_snapshot_id_req
- name: name_group_snap_req
- status: status_group_snap
- description: description_group_snap_req
- created_at: created_at
- group_id: group_id
- group_type_id: group_type_id
- project_id: project_id_group_snapshot
Response Example
----------------
.. literalinclude:: ./samples/group-snapshots-list-detailed-response.json
:language: javascript
List group snapshots
~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/group_snapshots
Lists all group snapshots, since v3.31 if non-admin users
specify invalid filters in the url, API will return bad request.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
- sort: sort_group_snapshot
- sort_key: sort_key_group_snapshot
- sort_dir: sort_dir_group_snapshot
- limit: limit_group_snapshot
- offset: offset_group_snapshot
- marker: marker_group_snapshot
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- group_snapshots: group_snapshots
- id: group_snapshot_id_req
- name: name_group_snap_req
Response Example
----------------
.. literalinclude:: ./samples/group-snapshots-list-response.json
:language: javascript
Create group snapshot
~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/group_snapshots
Creates a group snapshot.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- group_snapshot: group_snapshot
- name: name_group_snap
- description: description_group_snap
- group_id: group_id
Request Example
---------------
.. literalinclude:: ./samples/group-snapshots-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- group_snapshot: group_snapshot
- id: group_snapshot_id_req
- name: name_group_snap_req
- group_type_id: group_type_id
Response Example
----------------
.. literalinclude:: ./samples/group-snapshots-create-response.json
:language: javascript
Reset group snapshot status
~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method::
POST /v3/{project_id}/group_snapshots/{group_snapshot_id}/action
Resets the status for a group snapshot. Specifies the ``reset_status`` action
in the request body.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- group_snapshot_id: group_snapshot_id_path
- reset_status: reset_status
- status: status_group_snap
Request Example
---------------
.. literalinclude:: ./samples/group-snapshot-reset-status-request.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/group-type-specs.inc 0000664 0000000 0000000 00000007676 15131732575 0025504 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Group type specs (group_types, group_specs)
===========================================
Create or update group specs for a group type
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/group_types/{group_type_id}/group_specs
Create group specs for a group type, if the specification key already exists in
group specs, this API will update the specification as well.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 403
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- group_type_id: group_type_id_path
- group_specs: group_specs_req
Request Example
---------------
.. literalinclude:: ./samples/group-type-specs-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- group_specs: group_specs_req
Response Example
----------------
.. literalinclude:: ./samples/group-type-specs-create-response.json
:language: javascript
List group specs for a group type
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/group_types/{group_type_id}/group_specs
List all the group specs for a group type,
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 403
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- group_type_id: group_type_id_path
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- group_specs: group_specs_req
Response Example
----------------
.. literalinclude:: ./samples/group-type-specs-list-response.json
:language: javascript
Show one specific group spec for a group type
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method::
GET /v3/{project_id}/group_types/{group_type_id}/group_specs/{spec_id}
Show a group spec for a group type,
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 403
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- group_type_id: group_type_id_path
- spec_id: spec_id
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- spec: spec_value
Response Example
----------------
.. literalinclude:: ./samples/group-type-specs-show-response.json
:language: javascript
Update one specific group spec for a group type
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method::
PUT /v3/{project_id}/group_types/{group_type_id}/group_specs/{spec_id}
Update a group spec for a group type,
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 403
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- group_type_id: group_type_id_path
- spec_id: spec_id
- spec: spec_value
Request Example
---------------
.. literalinclude:: ./samples/group-type-specs-update-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- spec: spec_value
Response Example
----------------
.. literalinclude:: ./samples/group-type-specs-update-response.json
:language: javascript
Delete one specific group spec for a group type
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method::
DELETE /v3/{project_id}/group_types/{group_type_id}/group_specs/{spec_id}
Delete a group spec for a group type,
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 403
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- group_type_id: group_type_id_path
- spec_id: spec_id
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/group-types.inc 0000664 0000000 0000000 00000012130 15131732575 0024531 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Group types (group_types)
=========================
To create a generic volume group, you must specify a group type.
Update group type
~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/{project_id}/group_types/{group_type_id}
Updates a group type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 403
- 404
- 409
- 500
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- group_type_id: group_type_id_path
- group_type: group_type
- name: name_group
- description: description_group_type_optional
- is_public: is_public_group_type_optional
Request Example
---------------
.. literalinclude:: ./samples/group-type-update-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- group_type: group_type
- id: group_type_id
- is_public: is_public_group_type_required
- group_specs: group_specs
- description: description_group_type_required
- name: name_group_type
Response Example
----------------
.. literalinclude:: ./samples/group-type-show-response.json
:language: javascript
Show group type details
~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/group_types/{group_type_id}
Shows details for a group type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- group_type_id: group_type_id_path
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- group_type: group_type
- id: group_type_id
- name: name_group_type
- is_public: is_public_group_type_required
- group_specs: group_specs
- description: description_group_type_required
Response Example
----------------
.. literalinclude:: ./samples/group-type-show-response.json
:language: javascript
Show default group type details
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/group_types/default
Shows details for the default group type if configured.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- group_type: group_type
- id: group_type_id
- name: name_group_type
- is_public: is_public_group_type_required
- group_specs: group_specs
- description: description_group_type_required
Response Example
----------------
.. literalinclude:: ./samples/group-type-default-response.json
:language: javascript
Delete group type
~~~~~~~~~~~~~~~~~
.. rest_method:: DELETE /v3/{project_id}/group_types/{group_type_id}
Deletes a group type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 403
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- group_type_id: group_type_id_path
- project_id: project_id_path
List group types
~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/group_types
Lists group types.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- sort: sort
- sort_key: sort_key
- sort_dir: sort_dir
- limit: limit
- offset: offset
- marker: marker
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- group_types: group_types
- id: group_type_id
- group_specs: group_specs
- name: name_group_type
- is_public: is_public_group_type_required
- description: description_group_type_required
Response Example
----------------
.. literalinclude:: ./samples/group-types-list-response.json
:language: javascript
Create group type
~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/group_types
Creates a group type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 403
- 404
- 409
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- group_type: group_type
- name: name_group_type
- description: description_group_type_optional
- is_public: is_public_group_type_optional
- group_specs: group_specs
Request Example
---------------
.. literalinclude:: ./samples/group-type-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- group_type: group_type
- id: group_type_id
- is_public: is_public_group_type_required
- group_specs: group_specs
- description: description_group_type_required
- name: name_group_type
Response Example
----------------
.. literalinclude:: ./samples/group-type-show-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/groups.inc 0000664 0000000 0000000 00000016703 15131732575 0023564 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Generic volume groups (groups)
==============================
Generic volume groups enable you to create a group of volumes and
manage them together.
How is generic volume groups different from consistency groups?
Currently consistency groups in cinder only support consistent group
snapshot. It cannot be extended easily to serve other purposes. A project
may want to put volumes used in the same application together in a group
so that it is easier to manage them together, and this group of volumes
may or may not support consistent group snapshot. Generic volume group
is introduced to solve this problem. By decoupling the tight relationship
between the group construct and the consistency concept, generic volume
groups can be extended to support other features in the future.
List groups
~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/groups
Lists groups. Since v3.31 if non-admin users specify
invalid filters in the url, API will return bad request.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
- sort: sort
- sort_key: sort_key
- sort_dir: sort_dir
- limit: limit
- offset: offset
- marker: marker
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- groups: groups
- id: id
- name: group_name
Response Example
----------------
.. literalinclude:: ./samples/groups-list-response.json
:language: javascript
Create group
~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/groups
Creates a group.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- group: group
- description: description_group_false
- availability_zone: availability_zone
- group_type: group_type_id
- volume_types: volume_types
- name: group_name
Request Example
---------------
.. literalinclude:: ./samples/group-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- id: group_id_path
- name: group_name
Response Example
----------------
.. literalinclude:: ./samples/group-create-response.json
:language: javascript
Show group details
~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/groups/{group_id}
Shows details for a group.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- group_id: group_id_path
- list_volume: list_volume
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- group: group
- status: status_group
- description: description_group_true
- availability_zone: availability_zone
- created_at: created_at
- group_type: group_type_id
- group_snapshot_id: group_snapshot_id
- source_group_id: source_group_id
- volume_types: volume_types
- id: id
- name: group_name
- volumes: volume_ids
- replication_status: group_replication_status
- project_id: project_id_group
Response Example
----------------
.. literalinclude:: ./samples/group-show-response.json
:language: javascript
Create group from source
~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/groups/action
Creates a group from source.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- create-from-src: create-from-src
- description: description_group_false
- group_snapshot_id: group_snapshot_id_req
- source_group_id: source_group_id_req
- name: group_name
Request Example
---------------
.. literalinclude:: ./samples/group-create-from-src-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- id: group_id_path
- name: group_name
Response Example
----------------
.. literalinclude:: ./samples/group-create-from-src-response.json
:language: javascript
Delete group
~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/groups/{group_id}/action
Deletes a group.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- group_id: group_id_path
- delete: delete
- delete-volumes: delete-volumes
Request Example
---------------
.. literalinclude:: ./samples/group-delete-request.json
:language: javascript
List groups with details
~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/groups/detail
Lists groups with details, since v3.31 if non-admin
users specify invalid filters in the url, API will return bad request.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
- sort: sort
- sort_key: sort_key
- sort_dir: sort_dir
- limit: limit
- offset: offset
- marker: marker
- list_volume: list_volume
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- groups: groups
- status: status_group
- description: description_group_true
- availability_zone: availability_zone
- created_at: created_at
- group_type: group_type_id
- group_snapshot_id: group_snapshot_id
- source_group_id: source_group_id
- volume_types: volume_types
- id: group_id_path
- name: name
- volumes: volume_ids
- replication_status: group_replication_status
- project_id: project_id_group
Response Example
----------------
.. literalinclude:: ./samples/groups-list-detailed-response.json
:language: javascript
Update group
~~~~~~~~~~~~
.. rest_method:: PUT /v3/{project_id}/groups/{group_id}
Updates a group.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- group_id: group_id_path
- group: group
- remove_volumes: remove_volumes
- description: description_group_false
- add_volumes: add_volumes
- name: group_name
Request Example
---------------
.. literalinclude:: ./samples/group-update-request.json
:language: javascript
Reset group status
~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/groups/{group_id}/action
Resets the status for a group. Specify the ``reset_status`` action in the
request body.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- group_id: group_id_path
- reset_status: reset_status
- status: status
Request Example
---------------
.. literalinclude:: ./samples/group-reset-status-request.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/hosts.inc 0000664 0000000 0000000 00000003776 15131732575 0023413 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Hosts extension (os-hosts)
==========================
Administrators only, depending on policy settings.
Lists, shows hosts.
List all hosts for a project
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{admin_project_id}/os-hosts
Lists all hosts summary info that is not disabled.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 401
- 403
Request
-------
.. rest_parameters:: parameters.yaml
- admin_project_id: admin_project_id
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- hosts: hosts
- service-status: host_service_status
- service: host_service
- zone: availability_zone_required
- service-state: service_state
- host_name: host_name_backend
- last-update: updated_at
Response Example
----------------
.. literalinclude:: ./samples/hosts-list-response.json
:language: javascript
Show Host Details for a project
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{admin_project_id}/os-hosts/{host_name}
Shows volume and snapshot details for a cinder-volume host.
*Note:* This API is meant specifically for cinder-volume hosts only.
It is not valid against other Cinder service hosts or hosts where the
cinder-volume service has been disabled.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 401
- 403
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- admin_project_id: admin_project_id
- host_name: hostname
Response
--------
.. rest_parameters:: parameters.yaml
- host: host
- volume_count: total_count
- total_volume_gb: totalGigabytesUsedStr
- total_snapshot_gb: totalSnapGigabytesUsed
- project: project_id_host
- host: host_name_backend
- snapshot_count: totalSnapshotsUsed
Response Example
----------------
.. literalinclude:: ./samples/hosts-get-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/index.rst 0000664 0000000 0000000 00000005473 15131732575 0023415 0 ustar 00root root 0000000 0000000 :tocdepth: 2
==============================
Block Storage API V3 (CURRENT)
==============================
.. note::
The URL for most API methods includes a ``{project_id}`` placeholder that
represents the caller's project ID. As of v3.67, the project_id is optional
in the URL, and the following are equivalent:
* ``GET /v3/{project_id}/volumes``
* ``GET /v3/volumes``
In both instances, the actual project ID used by the API method is the one
in the caller's keystone context. For that reason, including a project ID in
the URL is redundant.
The v3.67 microversion is only used as an indicator that the API accepts a
URL without a ``{project_id}`` segment, and this applies to all requests
regardless of the microversion in the request. For example, an API node
serving v3.67 or greater will accept a URL without a ``{project_id}``
segment even if the request asks for v3.0. Likewise, it will accept a URL
containing a ``{project_id}`` segment even if the request asks for v3.67.
.. rest_expand_all::
.. First thing we want to see is the version discovery document.
.. include:: api-versions.inc
.. include:: volumes-v3-versions.inc
.. Next top-level thing could be listing extensions available on this endpoint.
.. include:: volumes-v3-extensions.inc
.. To create a volume, I might need a volume type, so list those next.
.. include:: volumes-v3-types.inc
.. include:: volume-type-access.inc
.. include:: default-types.inc
.. Now my primary focus is on volumes and what I can do with them.
.. include:: volumes-v3-volumes.inc
.. include:: volumes-v3-volumes-actions.inc
.. List the other random volume APIs in just alphabetical order.
.. include:: volume-manage.inc
.. include:: volumes-v3-snapshots.inc
.. include:: volumes-v3-snapshots-actions.inc
.. include:: snapshot-manage.inc
.. include:: os-vol-transfer-v3.inc
.. include:: vol-transfer-v3.inc
.. Now the other random things in alphabetical order.
.. include:: attachments.inc
.. include:: availability-zones-v3.inc
.. include:: os-vol-pool-v3.inc
.. include:: ext-backups.inc
.. include:: ext-backups-actions-v3.inc
.. include:: capabilities-v3.inc
.. include:: clusters.inc
.. include:: consistencygroups-v3.inc
.. include:: os-cgsnapshots-v3.inc
.. include:: os-services.inc
.. include:: groups.inc
.. include:: group-replication.inc
.. include:: group-snapshots.inc
.. include:: group-types.inc
.. include:: group-type-specs.inc
.. include:: hosts.inc
.. include:: limits.inc
.. include:: messages.inc
.. include:: resource-filters.inc
.. include:: qos-specs-v3-qos-specs.inc
.. quota-sets should arguably live closer to limits, but that would mess up
our nice alphabetical ordering
.. include:: quota-classes.inc
.. include:: quota-sets.inc
.. include:: worker-cleanup.inc
.. valid values for boolean parameters.
.. include:: valid-boolean-values.inc
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/limits.inc 0000664 0000000 0000000 00000002541 15131732575 0023541 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Limits (limits)
===============
Shows absolute limits for a project.
An absolute limit value of ``-1`` indicates that the absolute limit
for the item is infinite.
Show absolute limits for project
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/limits
Shows absolute limits for a project.
An absolute limit value of ``-1`` indicates that the absolute limit
for the item is infinite.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 403
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- limits: limits
- rate: rate
- absolute: absolute
- totalSnapshotsUsed: totalSnapshotsUsed_int
- maxTotalBackups: maxTotalBackups
- maxTotalVolumeGigabytes: maxTotalVolumeGigabytes
- maxTotalSnapshots: maxTotalSnapshots
- maxTotalBackupGigabytes: maxTotalBackupGigabytes
- totalBackupGigabytesUsed: totalBackupGigabytesUsed
- maxTotalVolumes: maxTotalVolumes
- totalVolumesUsed: totalVolumesUsed
- totalBackupsUsed: totalBackupsUsed
- totalGigabytesUsed: totalGigabytesUsed
Response Example
----------------
.. literalinclude:: ./samples/limits/limits-show-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/messages.inc 0000664 0000000 0000000 00000005076 15131732575 0024055 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Messages (messages)
===================
Lists all, shows, and deletes messages. These are error messages generated by
failed operations as a way to find out what happened when an asynchronous
operation failed.
Delete message
~~~~~~~~~~~~~~
.. rest_method:: DELETE /v3/{project_id}/messages/{message_id}
Deletes a message.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 204
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- message_id: message_id
Show message details
~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/messages/{message_id}
Shows details for a message.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- message_id: message_id
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- message: user_message
- request_id: request_id
- links: links_message
- message_level: message_level
- event_id: event_id
- created_at: created_at
- guaranteed_until: guaranteed_until
- resource_uuid: resource_uuid
- id: id_message
- resource_type: resource_type
- user_message: user_message
Response Example
----------------
.. literalinclude:: ./samples/messages-show-response.json
:language: javascript
List messages
~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/messages
Lists all messages, since v3.31 if non-admin users
specify invalid filters in the url, API will return bad request.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- sort: sort
- sort_key: sort_key
- sort_dir: sort_dir
- limit: limit
- offset: offset
- marker: marker
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- messages: user_messages
- request_id: request_id
- links: links_message
- message_level: message_level
- event_id: event_id
- created_at: created_at
- guaranteed_until: guaranteed_until
- resource_uuid: resource_uuid
- id: id_message
- resource_type: resource_type
- user_message: user_message
Response Example
----------------
.. literalinclude:: ./samples/messages-list-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/os-cgsnapshots-v3.inc 0000664 0000000 0000000 00000007336 15131732575 0025550 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Consistency group snapshots (DEPRECATED)
========================================
Lists all, lists all with details, shows details for, creates, and
deletes consistency group snapshots.
Delete a consistency group snapshot
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: DELETE /v3/{project_id}/cgsnapshots/{cgsnapshot_id}
Deletes a consistency group snapshot.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- cgsnapshot_id: cgsnapshot_id_path
Show consistency group snapshot detail
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/cgsnapshots/{cgsnapshot_id}
Shows details for a consistency group snapshot.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- cgsnapshot_id: cgsnapshot_id_path
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- cgsnapshot: cgsnapshot
- status: status
- description: description_cg_snapshot_true
- created_at: created_at
- consistencygroup_id: consistencygroup_id_required
- id: id
- name: name_cgsnap
Response Example
----------------
.. literalinclude:: ./samples/cgsnapshots-show-response.json
:language: javascript
List all consistency group snapshots with details
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/cgsnapshots/detail
Lists all consistency group snapshots with details.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- cgsnapshots: cgsnapshots
- status: status
- description: description_cg_snapshot_true
- created_at: created_at
- consistencygroup_id: consistencygroup_id_required
- id: id
- name: name_cgsnap
Response Example
----------------
.. literalinclude:: ./samples/cgsnapshots-list-detailed-response.json
:language: javascript
List all consistency group snapshots
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/cgsnapshots
Lists all consistency group snapshots.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- cgsnapshots: cgsnapshots
- id: id
- name: name_cgsnap
Response Example
----------------
.. literalinclude:: ./samples/cgsnapshots-list-response.json
:language: javascript
Create a consistency group snapshot
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/cgsnapshots
Creates a consistency group snapshot.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- cgsnapshot: cgsnapshot
- name: name_snap
- consistencygroup_id: consistencygroup_id_required
- description: description_cg_snapshot_false
Request Example
---------------
.. literalinclude:: ./samples/cgsnapshots-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- status: status
- description: description_cg_snapshot_true
- created_at: created_at
- consistencygroup_id: consistencygroup_id_required
- id: id
- name: name_cgsnap
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/os-services.inc 0000664 0000000 0000000 00000020124 15131732575 0024477 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Services (os-services)
======================
Administrator only. Lists all Cinder services, enables or disables
a Cinder service, freeze or thaw the specified cinder-volume host,
failover a replicating cinder-volume host.
List All Cinder Services
~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/os-services
Lists all Cinder services. Provides details why any services
were disabled.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 401
- 403
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- host: host_query
- binary: service_binary_query
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- services: services
- binary: binary_required
- disabled_reason: disabled_reason_body_req
- host: host_name_body_req
- state: service_state_up_down
- status: service_status
- frozen: frozen
- updated_at: updated
- zone: availability_zone_required
- cluster: cluster_cvol
- replication_status: replication_status_cvol
- active_backend_id: active_backend_id
- backend_state: backend_state
Response Example
----------------
.. literalinclude:: ./samples/services-list-response.json
:language: javascript
Disable a Cinder Service
~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/{project_id}/os-services/disable
Disables a Cinder service. Specify the service by its host name
and binary name.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 401
- 403
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- host: host_name_body_req
- binary: binary_required
Request Example
---------------
.. literalinclude:: ./samples/services-disable-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- disabled: disabled_required
- status: service_status
- host: host_name_body_req
- service: service_key
- binary: binary_required
Response Example
----------------
.. literalinclude:: ./samples/services-disable-response.json
:language: javascript
Log Disabled Cinder Service Information
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/{project_id}/os-services/disable-log-reason
Logs information to the Cinder service table about why a Cinder service was
disabled.
Specify the service by its host name and binary name.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 401
- 403
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- binary: binary_required
- host: host_name_body_req
- disabled_reason: disabled_reason_body
Request Example
---------------
.. literalinclude:: ./samples/services-disable-log-request.json
:language: javascript
Response
--------
.. rest_parameters:: parameters.yaml
- disabled: disabled_required
- status: service_status
- host: host_name_body_req
- service: service_key
- binary: binary_required
- disabled_reason: disabled_reason_body_req
Response Example
----------------
.. literalinclude:: ./samples/services-disable-log-response.json
:language: javascript
Enable a Cinder Service
~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/{project_id}/os-services/enable
Enables a Cinder service. Specify the service by its host name
and binary name.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 401
- 403
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- binary: binary_required
- host: host_name_body_req
Request Example
---------------
.. literalinclude:: ./samples/services-enable-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- disabled: disabled_required
- status: service_status
- host: host_name_body_req
- service: service_key
- binary: binary_required
- disabled_reason: disabled_reason_body_req
Response Example
----------------
.. literalinclude:: ./samples/services-enable-response.json
:language: javascript
Get Current Log Levels for Cinder Services
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/{project_id}/os-services/get-log
Get current log levels for services, supported since v3.32. Filter the
services by binary, server name and prefix for the log path.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 401
- 403
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- binary: binary
- server: host_name_body
- prefix: prefix
Request Example
---------------
.. literalinclude:: ./samples/services-get-log-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- log_levels: log_levels
- binary: binary_required
- host: host_name_body_req
- levels: levels
Response Example
----------------
.. literalinclude:: ./samples/services-get-log-response.json
:language: javascript
Set Log Levels of Cinder Services Dynamically
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/{project_id}/os-services/set-log
Set log levels of services dynamically, supported since v3.32. Filter the
services by binary, server name and prefix for the log path.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 401
- 403
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- binary: binary
- server: host_name_body
- prefix: prefix
- levels: levels_set
Request Example
---------------
.. literalinclude:: ./samples/services-set-log-request.json
:language: javascript
Freeze a Cinder Backend Host
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/{project_id}/os-services/freeze
Freeze and disable the specified cinder-volume host, and set
``Disabled Reason`` of Cinder service table to ``frozen``.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 401
- 403
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- host: host_name_body_req
Request Example
---------------
.. literalinclude:: ./samples/services-freeze-request.json
:language: javascript
Thaw a Cinder Backend Host
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/{project_id}/os-services/thaw
Thaw and enable the specified cinder-volume host, and clean
``Disabled Reason`` of Cinder service table.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 401
- 403
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- host: host_name_body_req
Request Example
---------------
.. literalinclude:: ./samples/services-thaw-request.json
:language: javascript
Failover a Cinder Backend Host
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/{project_id}/os-services/failover_host
Failover a replicating cinder-volume host. Since Cinder Volume API Version
3.26, you can use ``failover`` in request URL instead of ``failover_host``,
and the cluster name in request body is supported.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 401
- 403
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- host: host_name_body_req
- backend_id: backend_id
- cluster: cluster_cvol
Request Example
---------------
.. literalinclude:: ./samples/services-failover-host-request.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/os-vol-pool-v3.inc 0000664 0000000 0000000 00000002343 15131732575 0024754 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Back-end storage pools
======================
Administrator only. Lists all back-end storage pools that are known
to the scheduler service.
List all back-end storage pools
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/scheduler-stats/get_pools
Lists all back-end storage pools. Since v3.31 if non-admin users
specify invalid filters in the url, API will return bad request.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- detail: detail
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- pools: pools
- updated: updated
- QoS_support: QoS_support
- name: name_backend_pool
- total_capacity_gb: total_capacity
- volume_backend_name: volume_backend_name
- capabilities: capabilities
- free_capacity_gb: free_capacity
- driver_version: driver_version
- reserved_percentage: reserved_percentage
- storage_protocol: storage_protocol
Response Example
----------------
.. literalinclude:: ./samples/pools-list-detailed-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/os-vol-transfer-v3.inc 0000664 0000000 0000000 00000010673 15131732575 0025634 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Volume transfer
===============
Transfers a volume from one user to another user.
Accept a volume transfer
~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/os-volume-transfer/{transfer_id}/accept
Accepts a volume transfer.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- transfer_id: transfer_id
- auth_key: auth_key
Request Example
---------------
.. literalinclude:: ./samples/volume_transfer/volume-transfer-accept-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- transfer: transfer
- volume_id: volume_id
- id: transfer_obj_id
- links: links
- name: name
Response Example
----------------
.. literalinclude:: ./samples/volume_transfer/volume-transfer-accept-response.json
:language: javascript
Create a volume transfer
~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/os-volume-transfer
Creates a volume transfer.
**Preconditions**
* The volume ``status`` must be ``available``
* Transferring encrypted volumes is not supported
* If the volume has snapshots, those snapshots must be ``available``
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- transfer: transfer
- name: name
- volume_id: volume_id
Request Example
---------------
.. literalinclude:: ./samples/volume_transfer/volume-transfer-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- auth_key: auth_key
- links: links
- created_at: created_at
- volume_id: volume_id
- id: transfer_obj_id
- name: name
Response Example
----------------
.. literalinclude:: ./samples/volume_transfer/volume-transfer-create-response.json
:language: javascript
List volume transfers for a project
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/os-volume-transfer
Lists volume transfers.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- volume_id: volume_id
- id: transfer_obj_id
- links: links
- name: name
Response Example
----------------
.. literalinclude:: ./samples/volume_transfer/volume-transfers-list-response.json
:language: javascript
Show volume transfer detail
~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/os-volume-transfer/{transfer_id}
Shows details for a volume transfer.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- transfer_id: transfer_id
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- created_at: created_at
- volume_id: volume_id
- id: transfer_obj_id
- links: links
- name: name
Response Example
----------------
.. literalinclude:: ./samples/volume_transfer/volume-transfer-show-response.json
:language: javascript
Delete a volume transfer
~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: DELETE /v3/{project_id}/os-volume-transfer/{transfer_id}
Deletes a volume transfer.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- transfer_id: transfer_id
List volume transfers and details
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/os-volume-transfer/detail
Lists volume transfers, with details.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- transfers: transfers
- created_at: created_at
- volume_id: volume_id
- id: transfer_obj_id
- links: links
- name: name
Response Example
----------------
.. literalinclude:: ./samples/volume_transfer/volume-transfers-list-detailed-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/parameters.yaml 0000664 0000000 0000000 00000246011 15131732575 0024576 0 ustar 00root root 0000000 0000000 # variables in header
x-openstack-request-id:
description: >
foo
in: header
required: false
type: string
# variables in path
admin_project_id:
description: |
The UUID of the administrative project.
in: path
required: true
type: string
attachment_id_path:
description: |
The ID of the attachment.
in: path
required: true
type: string
backup_id_required:
description: |
The UUID for a backup.
in: path
required: true
type: string
cgsnapshot_id_path:
description: |
The ID of the consistency group snapshot.
in: path
required: true
type: string
cluster_name_path:
description: |
The name of the cluster.
in: path
required: true
type: string
consistencygroup_id_path:
description: |
The ID of the consistency group.
in: path
required: true
type: string
encryption_id:
description: |
The ID of the encryption type.
in: path
required: true
type: string
group_id_path:
description: |
The ID of the group.
in: path
required: true
type: string
group_snapshot_id_path:
description: |
The ID of the group snapshot.
in: path
required: true
type: string
group_type_id_path:
description: |
The UUID for an existing group type.
in: path
required: true
type: string
hostname:
description: |
The name of the host that hosts the storage back
end.
in: path
required: true
type: string
init_at:
description: |
The date and time when the resource was initiated.
The date and time stamp format is `ISO 8601
`_:
::
CCYY-MM-DDThh:mm:ss±hh:mm
in: path
required: true
type: string
key_encrypt_spec:
description: |
The key name of the encryption spec for a volume type.
in: path
required: true
type: string
key_extra_spec:
description: |
The key name of the extra spec for a volume type.
in: path
required: true
type: string
key_path:
description: |
The metadata key name for the metadata that you
want to remove.
in: path
required: true
type: string
key_update:
description: |
The metadata key name for the metadata that you
want to update.
in: path
required: true
type: string
key_view:
description: |
The metadata key name for the metadata that you
want to see.
in: path
required: true
type: string
list_volume:
description: |
Show volume ids in this group. Default is False.
in: path
required: false
type: string
min_version: 3.25
message_id:
description: |
The UUID of the message.
in: path
required: true
type: string
project_id_path:
description: |
The UUID of the project in a multi-tenancy cloud.
in: path
required: true
type: string
qos_id:
description: |
The ID of the QoS specification.
in: path
required: true
type: string
quota_class_name:
description:
The name of the quota class for which to set quotas.
in: path
required: true
type: string
quotas_project_id:
description: |
The UUID of the project in a multi-tenancy cloud.
in: path
required: true
type: string
snapshot_id_path:
description: |
The UUID of the snapshot.
in: path
required: true
type: string
spec_id:
description: |
The id (key) of the group specification.
in: path
required: true
type: string
transfer_id:
description: |
The unique identifier for a volume transfer.
in: path
required: true
type: string
volume_id_path:
description: |
The UUID of the volume.
in: path
required: true
type: string
volume_type_id:
description: |
The UUID for an existing volume type.
in: path
required: true
type: string
volume_type_name_or_id:
description: |
The name or UUID for an existing volume type.
in: path
required: true
type: string
# variables in query
action:
description: |
The action. Valid values are "set" or "unset."
in: query
required: true
type: string
all-tenants:
description: |
Shows details for all project. Admin only.
in: query
required: false
type: string
bootable_query:
description: |
Filters results by bootable status. Default=None.
in: query
required: false
type: boolean
cascade:
description: |
Remove any snapshots along with the volume. Default=False.
in: query
required: false
type: boolean
cluster_active_backend_id_query:
description: |
Filter the cluster list result by the ID of the active backend.
in: query
required: false
type: string
cluster_binary_query:
description: |
Filter the cluster list result by binary name of the clustered services.
One of ``cinder-api``, ``cinder-scheduler``, ``cinder-volume``
or ``cinder-backup``.
in: query
required: false
type: string
cluster_disabled_query:
description: |
Filter the cluster list result by status.
in: query
required: false
type: boolean
cluster_frozen_query:
description: |
Filter the cluster list result by whether it's frozen.
in: query
required: false
type: boolean
cluster_is_up_query:
description: |
Filter the cluster list result by state.
in: query
required: false
type: boolean
cluster_name_query:
description: |
Filter the cluster list result by cluster name.
in: query
required: false
type: string
cluster_num_down_hosts_query:
description: |
Filter the cluster list result by number of down hosts.
in: query
required: false
type: integer
cluster_num_hosts_query:
description: |
Filter the cluster list result by number of hosts.
in: query
required: false
type: integer
cluster_replication_status_query:
description: |
Filter the cluster list result by replication status. One of: ``enabled``,
``disabled``.
in: query
required: false
type: string
detail:
description: |
Indicates whether to show pool details or only
pool names in the response. Set to ``true`` to show pool details.
Set to ``false`` to show only pool names. Default is ``false``.
in: query
required: false
type: boolean
filter_consumes_quota:
description: |
Filters results by ``consumes_quota`` field. Resources that don't use
quotas are usually temporary internal resources created to perform an
operation. Default is to not filter by it. Filtering by this option may
not be always possible in a cloud, see
:ref:`List Resource Filters ` to determine whether this
filter is available in your cloud.
in: query
required: false
type: boolean
min_version: 3.65
filter_created_at:
description: |
Filters reuslts by a time that resources are created at with time
comparison operators: gt/gte/eq/neq/lt/lte.
The date and time stamp format is ISO 8601: CCYY-MM-DDThh:mm:ss±hh:mm.
The ±hh:mm value, if included, returns the time zone as an offset from UTC.
in: query
required: false
type: string
min_version: 3.60
filter_updated_at:
description: |
Filters reuslts by a time that resources are updated at with time
comaprison operators: gt/gte/eq/neq/lt/lte.
The date and time stamp format is ISO 8601: CCYY-MM-DDThh:mm:ss±hh:mm.
The ±hh:mm value, if included, returns the time zone as an offset from UTC.
in: query
required: false
type: string
min_version: 3.60
force_del_qos:
description: |
To delete a QoS specification even if it is in-
use, set to ``true``. Default is ``false``.
in: query
required: false
type: boolean
force_vol_del:
description: |
Indicates whether to force delete a volume even if
the volume is in deleting or error_deleting. Default is ``false``.
in: query
required: false
type: boolean
min_version: 3.23
host_query:
description: |
Filter the service list result by host name of the service.
in: query
required: false
type: string
image-id:
description: |
Creates volume from image ID. Default=None.
in: query
required: false
type: string
instance_uuid_query_optional:
description: |
The UUID of the attaching instance.
in: query
required: false
type: string
is_public_volume_type_query:
description: |
Filter the volume type by public visibility.
See :ref:`valid boolean values `.
in: query
required: false
type: boolean
limit:
description: |
Requests a page size of items. Returns a number
of items up to a limit value. Use the ``limit`` parameter to make
an initial limited request and use the ID of the last-seen item
from the response as the ``marker`` parameter value in a
subsequent limited request.
in: query
required: false
type: integer
limit_group_snapshot:
description: |
Requests a page size of items. Returns a number
of items up to a limit value. Use the ``limit`` parameter to make
an initial limited request and use the ID of the last-seen item
from the response as the ``marker`` parameter value in a
subsequent limited request.
in: query
required: false
type: integer
min_version: 3.29
limit_transfer:
description: |
Requests a page size of items. Returns a number
of items up to a limit value. Use the ``limit`` parameter to make
an initial limited request and use the ID of the last-seen item
from the response as the ``marker`` parameter value in a
subsequent limited request.
in: query
required: false
type: integer
min_version: 3.59
marker:
description: |
The ID of the last-seen item. Use the ``limit``
parameter to make an initial limited request and use the ID of the
last-seen item from the response as the ``marker`` parameter value
in a subsequent limited request.
in: query
required: false
type: string
marker_group_snapshot:
description: |
The ID of the last-seen item. Use the ``limit``
parameter to make an initial limited request and use the ID of the
last-seen item from the response as the ``marker`` parameter value
in a subsequent limited request.
in: query
required: false
type: string
min_version: 3.29
marker_transfer:
description: |
The ID of the last-seen item. Use the ``limit``
parameter to make an initial limited request and use the ID of the
last-seen item from the response as the ``marker`` parameter value
in a subsequent limited request.
in: query
required: false
type: string
min_version: 3.59
metadata_query:
description: |
One or more metadata key and value pairs as a
url encoded dictionary of strings.
in: query
required: false
type: object
migration_status_query:
description: |
Filters results by a migration status. Default=None.
Admin only.
in: query
required: false
name_volume:
description: |
Filters results by a name. Default=None.
in: query
required: false
type: string
offset:
description: |
Used in conjunction with ``limit`` to return a slice of items. ``offset``
is where to start in the list.
in: query
required: false
type: integer
offset_group_snapshot:
description: |
Used in conjunction with ``limit`` to return a slice of items. ``offset``
is where to start in the list.
in: query
required: false
type: integer
min_version: 3.29
offset_transfer:
description: |
Used in conjunction with ``limit`` to return a slice of items. ``offset``
is where to start in the list.
in: query
required: false
type: integer
min_version: 3.59
resource:
description: |
Filter for resource name.
in: query
required: false
type: string
service_binary_query:
description: |
Filter the service list result by binary name of the service.
in: query
required: false
type: string
sort:
description: |
Comma-separated list of sort keys and optional
sort directions in the form of ``< key > [: < direction > ]``.
A valid direction is ``asc`` (ascending) or ``desc`` (descending).
in: query
required: false
type: string
sort_dir:
description: |
Sorts by one or more sets of attribute and sort
direction combinations. If you omit the sort direction in a set,
default is ``desc``.
Deprecated in favour of the combined ``sort`` parameter.
in: query
required: false
type: string
sort_dir_group_snapshot:
description: |
Sorts by one or more sets of attribute and sort
direction combinations. If you omit the sort direction in a set,
default is ``desc``.
Deprecated in favour of the combined ``sort`` parameter.
in: query
required: false
type: string
min_version: 3.29
sort_dir_transfer:
description: |
Sorts by one or more sets of attribute and sort
direction combinations. If you omit the sort direction in a set,
default is ``desc``.
Deprecated in favour of the combined ``sort`` parameter.
in: query
required: false
type: string
min_version: 3.59
sort_group_snapshot:
description: |
Comma-separated list of sort keys and optional
sort directions in the form of ``< key > [: < direction > ]``.
A valid direction is ``asc`` (ascending) or ``desc`` (descending).
in: query
required: false
type: string
min_version: 3.29
sort_key:
description: |
Sorts by an attribute. A valid value is ``name``,
``status``, ``container_format``, ``disk_format``, ``size``,
``id``, ``created_at``, or ``updated_at``. Default is
``created_at``. The API uses the natural sorting direction of the
``sort_key`` attribute value.
Deprecated in favour of the combined ``sort`` parameter.
in: query
required: false
type: string
sort_key_group_snapshot:
description: |
Sorts by an attribute. A valid value is ``name``,
``status``, ``group_id``, ``group_type_id``, ``size``,
``id``, ``created_at``, or ``updated_at``. Default is
``created_at``. The API uses the natural sorting direction of the
``sort_key`` attribute value.
Deprecated in favour of the combined ``sort`` parameter.
in: query
required: false
type: string
min_version: 3.29
sort_key_transfer:
description: |
Sorts by an attribute. Default is
``created_at``. The API uses the natural sorting direction of the
``sort_key`` attribute value.
Deprecated in favour of the combined ``sort`` parameter.
in: query
required: false
type: string
min_version: 3.59
sort_transfer:
description: |
Comma-separated list of sort keys and optional
sort directions in the form of ``< key > [: < direction > ]``.
A valid direction is ``asc`` (ascending) or ``desc`` (descending).
in: query
required: false
type: string
min_version: 3.59
status_query:
description: |
Filters results by a status. Default=None.
in: query
required: false
type: boolean
usage:
description: |
Show project's quota usage information.
Default is ``false``.
in: query
required: false
type: boolean
vol_type_id_query:
description: |
A volume type ID.
in: query
required: true
type: string
volume_id_attachment_query_optional:
description: |
The UUID of the volume which the attachment belongs
to.
in: query
required: false
type: string
with_count:
description: |
Whether to show ``count`` in API response or not, default is ``False``.
in: query
required: false
type: boolean
min_version: 3.45
# variables in body
absolute:
description: |
An ``absolute`` limits object.
in: body
required: true
type: object
accepted:
description: |
Records if this transfer was accepted or not.
in: body
required: false
type: boolean
min_version: 3.57
active_backend_id:
description: |
The ID of active storage backend. Only in ``cinder-volume`` service.
in: body
required: false
type: string
add_project_access:
description: |
Adds volume type access to a project.
in: body
required: true
type: object
add_volumes:
description: |
One or more volume UUIDs, separated by commas, to
add to the volume group or consistency group.
in: body
required: false
type: string
alias:
description: |
The alias for the extension. For example,
"FOXNSOX", "os- availability-zone", "os-extended-quotas", "os-
share-unmanage" or "os-used-limits."
in: body
required: true
type: string
allow_attached_volume:
description: |
Whether to allow failover if any volumes are 'in-use'.
See :ref:`valid boolean values `
in: body
required: true
type: boolean
attach_mode:
description: |
The attach mode of attachment, acceptable values are
read-only ('ro') and read-and-write ('rw').
in: body
required: false
type: string
min_version: 3.54
attach_mode_required:
description: |
The attach mode of attachment, read-only ('ro') or
read-and-write ('rw'), default is 'rw'.
in: body
required: true
type: string
attach_status:
description: |
The volume attach status.
in: body
required: false
type: string
attached_at:
description: |
The time when attachment is attached.
in: body
required: true
type: string
attachment:
description: |
An attachment object.
in: body
required: true
type: object
attachment_id:
description: |
The ID of the attachment.
in: body
required: false
type: string
attachment_id_required:
description: |
The ID of attachment.
in: body
required: true
type: string
attachments:
description: |
Instance attachment information. If this volume
is attached to a server instance, the attachments list includes
the UUID of the attached server, an attachment UUID, the name of
the attached host, if any, the volume UUID, the device, and the
device UUID. Otherwise, this list is empty. For example::
[
{
'server_id': '6c8cf6e0-4c8f-442f-9196-9679737feec6',
'attachment_id': '3dafcac4-1cb9-4b60-a227-d729baa10cf6',
'attached_at': '2019-09-30T19:30:34.000000',
'host_name': null,
'volume_id': '5d95d5ee-4bdd-4452-b9d7-d44ca10d3d53',
'device': '/dev/vda',
'id': '5d95d5ee-4bdd-4452-b9d7-d44ca10d3d53'
}
]
in: body
required: true
type: array
auth_key:
description: |
The authentication key for the volume transfer.
in: body
required: true
type: string
availability_zone:
description: |
The name of the availability zone.
in: body
required: false
type: string
availability_zone_backup:
description: |
The backup availability zone key value pair.
in: body
required: false
type: string
min_version: 3.51
availability_zone_info:
description: |
The list of availability zone information.
in: body
required: true
type: array
availability_zone_required:
description: |
The availability zone name.
in: body
required: true
type: string
availability_zone_state:
description: |
The current state of the availability zone.
in: body
required: true
type: object
available:
description: |
Whether the availability zone is available for use.
in: body
required: true
type: boolean
backend_id:
description: |
ID of backend to failover to. Default is ``None``.
in: body
required: false
type: string
backend_id_target:
description: |
ID of failover target backend.
in: body
required: true
type: string
backend_state:
description: |
The state of storage backend. Only in ``cinder-volume`` service.
in: body
required: false
type: string
backup:
description: |
A ``backup`` object.
in: body
required: true
type: object
backup_gigabytes:
description: |
The size (GB) of backups that are allowed for each project.
in: body
required: true
type: integer
backup_gigabytes_usage:
description: |
The size (GB) usage information of backup for this project, including ``in_use``,
``limit`` and ``reserved`` attributes.
in: body
required: true
type: object
backup_id:
description: |
The UUID of the backup.
in: body
required: false
type: string
min_version: 3.47
backup_links_optional:
description: |
An array containing an object with the following fields:
``"rel"`` with the value ``"next"`` and ``href``,
whose value is a link to the next page of backups.
Only appears when there are more backups than are
listed in the current response.
in: body
required: false
type: array
backup_record:
description: |
An object recording volume backup metadata, including ``backup_service``
and ``backup_url``.
in: body
required: true
type: object
backup_service:
description: |
The service used to perform the backup.
in: body
required: true
type: string
backup_url:
description: |
An identifier string to locate the backup.
in: body
required: true
type: string
backups:
description: |
A list of ``backup`` objects.
in: body
required: true
type: array
backups_number:
description: |
The number of backups that are allowed for each project.
in: body
required: true
type: integer
backups_number_usage:
description: |
The backup usage information for this project, including ``in_use``,
``limit`` and ``reserved`` attributes.
in: body
required: true
type: object
binary:
description: |
The binary name of the service.
in: body
required: false
type: string
binary_required:
description: |
The binary name of the service.
in: body
required: true
type: string
bootable:
description: |
Enables or disables the bootable attribute. You
can boot an instance from a bootable volume.
See :ref:`valid boolean values `
in: body
required: false
type: boolean
bootable_required:
description: |
Enables or disables the bootable attribute. You
can boot an instance from a bootable volume.
See :ref:`valid boolean values `
in: body
required: true
type: boolean
bootable_response:
description: |
Enables or disables the bootable attribute. You
can boot an instance from a bootable volume.
in: body
required: true
type: string
capabilities:
description: |
The capabilities for the back end. The value is
either ``null`` or a string value that indicates the capabilities
for each pool. For example, ``total_capacity_gb`` or ``QoS_support``.
in: body
required: true
type: object
cgsnapshot:
description: |
A consistency group snapshot object.
in: body
required: true
type: object
cgsnapshot_id:
description: |
The UUID of the consistency group snapshot.
in: body
required: false
type: string
cgsnapshots:
description: |
A collection of ``cgsnapshot`` objects.
in: body
required: true
type: object
cinder_id:
description: |
The UUID of the resource in Cinder.
in: body
required: true
type: string
cipher:
description: |
The encryption algorithm or mode. For example, aes-xts-plain64. The default
value is None.
in: body
required: false
type: string
cluster:
description: |
A cluster object.
in: body
required: true
type: object
cluster_active_backend_id:
description: |
The ID of active storage backend. Only in ``cinder-volume`` service.
in: body
required: false
type: string
min_version: 3.26
cluster_binary:
description: |
The binary name of the services in the cluster.
in: body
required: false
type: string
cluster_binary_resp:
description: |
The binary name of the services in the cluster.
in: body
required: true
type: string
cluster_cvol:
description: |
The cluster name. Only in ``cinder-volume`` service.
in: body
required: false
type: string
min_version: 3.7
cluster_frozen:
description: |
Whether the cluster is frozen or not.
in: body
required: false
type: boolean
min_version: 3.26
cluster_last_heartbeat:
description: |
The last periodic heartbeat received.
The date and time stamp format is `ISO 8601
`_:
::
CCYY-MM-DDThh:mm:ss±hh:mm
For example, ``2015-08-27T09:49:58-05:00``.
The ``±hh:mm`` value, if included, is the time zone as an offset
from UTC.
in: body
required: true
type: string
cluster_mutex:
description: |
The OpenStack Block Storage cluster where the resource resides. Optional
only if host field is provided.
in: body
required: false
type: string
cluster_name_required:
description: |
The name to identify the service cluster.
in: body
required: true
type: string
cluster_name_resp:
description: |
The name of the service cluster.
in: body
required: true
type: string
cluster_num_down_hosts:
description: |
The number of down hosts in the cluster.
in: body
required: true
type: integer
cluster_num_hosts:
description: |
The number of hosts in the cluster.
in: body
required: true
type: integer
cluster_replication_status:
description: |
The cluster replication status. Only included in responses if configured.
One of: ``enabled`` or ``disabled``.
in: body
required: false
type: string
cluster_state:
description: |
The state of the cluster. One of ``up`` or ``down``.
in: body
required: true
type: string
cluster_status:
description: |
The status of the cluster. One of ``enabled`` or ``disabled``.
in: body
required: true
type: string
clusters:
description: |
A list of cluster objects.
in: body
required: true
type: array
connection_info:
description: |
The connection info used for server to connect the volume.
in: body
required: true
type: object
connector:
description: |
The ``connector`` object.
in: body
required: false
type: object
connector_required:
description: |
The ``connector`` object. The internal structure of connector depends on
the volume driver implementation. For details about the required elements
in the structure, see the documentation for the volume driver.
in: body
required: true
type: object
consistencygroup:
description: |
A consistency group.
in: body
required: true
type: object
consistencygroup-from-src:
description: |
The consistency group from source object.
in: body
required: true
type: object
consistencygroup_id:
description: |
The UUID of the consistency group.
in: body
required: false
type: string
consistencygroup_id_required:
description: |
The UUID of the consistency group.
in: body
required: true
type: string
consistencygroups:
description: |
A list of consistency groups.
in: body
required: true
type: array
consumer:
description: |
The consumer type.
in: body
required: false
type: string
consumes_quota:
description: |
Whether this resource consumes quota or not. Resources that not counted
for quota usage are usually temporary internal resources created to perform
an operation.
in: body
required: false
type: boolean
min_version: 3.65
container:
description: |
The container name or null.
in: body
required: false
type: string
container_format:
description: |
Container format for the new image. Default is bare.
in: body
required: false
type: string
container_format_upload:
description: |
Container format for the new image. Default is bare. (Note: Volumes
of an encrypted volume type must use a bare container format.)
in: body
required: false
type: string
control_location:
description: |
Notional service where encryption is performed. Valid values are
"front-end" or "back-end". The default value is "front-end".
in: body
required: false
type: string
count:
description: |
The total count of requested resource before pagination is applied.
in: body
required: false
type: integer
min_version: 3.45
create-from-src:
description: |
The create from source action.
in: body
required: true
type: object
created_at:
description: |
The date and time when the resource was created.
The date and time stamp format is `ISO 8601
`_:
::
CCYY-MM-DDThh:mm:ss±hh:mm
For example, ``2015-08-27T09:49:58-05:00``.
The ``±hh:mm`` value, if included, is the time zone as an offset
from UTC.
in: body
required: true
type: string
data_timestamp:
description: |
The time when the data on the volume was first saved. If it is
a backup from volume, it will be the same as ``created_at``
for a backup. If it is a backup from a snapshot, it will be the
same as ``created_at`` for the snapshot.
in: body
required: true
type: string
delete:
description: |
The delete action.
in: body
required: true
type: object
delete-volumes:
description: |
If set to ``true``, allows deletion of a
group as well as all volumes in the group.
See :ref:`valid boolean values `
in: body
required: false
type: boolean
deleted:
description: |
The resource is deleted or not.
in: body
required: true
type: boolean
deleted_at:
description: |
The date and time when the resource was deleted.
The date and time stamp format is `ISO 8601
`_:
::
CCYY-MM-DDThh:mm:ss±hh:mm
For example, ``2015-08-27T09:49:58-05:00``.
The ``±hh:mm`` value, if included, is the time zone as an offset
from UTC. In the previous example, the offset value is ``-05:00``.
If the ``deleted_at`` date and time stamp is not set, its value is
``null``.
in: body
required: true
type: string
dependents:
description: |
Show the dependencies cluster.
in: body
requered: false
type: string
description:
description: |
The backup description or null.
in: body
required: false
type: string
description_cap:
description: |
The capabilities description.
in: body
required: true
type: string
description_cg:
description: |
The consistency group description.
in: body
required: true
type: string
description_cg_snapshot_false:
description: |
The consistency group snapshot description.
in: body
required: false
type: string
description_cg_snapshot_true:
description: |
The consistency group snapshot description.
in: body
required: true
type: string
description_consis:
description: |
The consistency group description.
in: body
required: false
type: string
description_extn:
description: |
The extension description.
in: body
required: true
type: string
description_group_false:
description: |
The group description.
in: body
required: false
type: string
description_group_snap:
description: |
The group snapshot description.
in: body
required: false
type: string
description_group_snap_req:
description: |
The group snapshot description.
in: body
required: true
type: string
description_group_true:
description: |
The group description.
in: body
required: true
type: string
description_group_type_optional:
description: |
The group type description.
in: body
required: false
type: string
description_group_type_required:
description: |
The group type description.
in: body
required: true
type: string
description_snap:
description: |
A description for the snapshot. Default is
``None``.
in: body
required: false
type: string
description_snap_req:
description: |
A description for the snapshot.
in: body
required: true
type: string
description_vol:
description: |
The volume description.
in: body
required: false
type: string
description_vol_req:
description: |
The volume description.
in: body
required: true
type: string
description_volume_type_optional:
description: |
The volume type description.
in: body
required: false
type: string
description_volume_type_required:
description: |
The volume type description.
in: body
required: true
type: string
desired_capacity:
description: |
Show and set the desired capacity of cluster.
in: body
requered: true
type: string
min_version: 3.7
destination_project_id:
description: |
Records the destination project_id after volume transfer.
in: body
required: false
type: string
min_version: 3.57
detached_at:
description: |
The time when attachment is detached.
in: body
required: true
type: string
disabled:
description: |
Filter by disabled status.
See :ref:`valid boolean values `
in: body
required: false
type: boolean
disabled_reason_body:
description: |
The reason for disabling a resource.
in: body
required: false
type: string
disabled_reason_body_req:
description: |
The reason for disabling a resource.
in: body
required: true
type: string
disabled_required:
description: |
The service is disabled or not.
in: body
required: true
type: boolean
disk_format:
description: |
Disk format for the new image. Default is raw.
in: body
required: false
type: string
disk_format_upload:
description: |
Disk format for the new image. Default is raw. (Note: volumes of an
encrypted volume type can only be uploaded in raw format.)
in: body
required: false
type: string
display_name:
description: |
The name of volume backend capabilities.
in: body
required: true
type: string
domain:
description: |
show de domain in use for the cluster.
in: body
required: true
type: string
driver_version:
description: |
The driver version.
in: body
required: true
type: string
encrypted:
description: |
If true, this volume is encrypted.
in: body
required: true
type: boolean
encryption:
description: |
The encryption information.
in: body
required: true
type: object
encryption_id_body:
description: |
The UUID of the encryption.
in: body
required: true
type: string
encryption_key_id:
description: |
The UUID of the encryption key. Only included for encrypted volumes.
in: body
required: false
type: string
min_version: 3.64
event_id:
description: |
The id of the event to this message, this id could
eventually be translated into ``user_message``.
in: body
required: true
type: string
extend_completion_error:
description: |
Used to indicate that the extend operation has failed outside of cinder.
in: body
required: false
type: boolean
extra_info:
description: |
More information about the resource.
in: body
required: true
type: string
extra_specs_volume_type_optional:
description: |
A key and value pair that contains additional
specifications that are associated with the volume type. Examples
include capabilities, capacity, compression, and so on, depending
on the storage driver in use.
in: body
required: false
type: object
extra_specs_volume_type_required:
description: |
A set of key and value pairs that contains the
specifications for a volume type.
in: body
required: true
type: object
fail_reason:
description: |
If the backup failed, the reason for the failure.
Otherwise, null.
in: body
required: true
type: string
force:
description: |
Indicates whether to backup, even if the volume
is attached. Default is ``false``.
See :ref:`valid boolean values `
in: body
required: false
type: boolean
force_snapshot:
description: |
Indicates whether to snapshot, even if the volume
is attached. Default is ``false``.
See :ref:`valid boolean values `
in: body
required: false
type: boolean
force_upload_vol:
description: |
Enables or disables upload of a volume that is
attached to an instance. Default=False.
See :ref:`valid boolean values `
in: body
required: false
type: boolean
free_capacity:
description: |
The amount of free capacity for the back-end
volume, in GBs. A valid value is a string, such as ``unknown``, or
a number (integer or floating point).
in: body
required: true
type: string
frozen:
description: |
The host is frozen or not. Only in ``cinder-volume`` service.
in: body
required: false
type: boolean
gigabytes:
description: |
The size (GB) of volumes and snapshots that are allowed for each project.
in: body
required: true
type: integer
gigabytes_for_type:
description: |
The size (GB) of volumes and snapshots that are allowed for each project
and the specified volume type.
in: body
required: true
type: integer
gigabytes_for_type_usage:
description: |
The size (GB) usage information of volumes and snapshots for this project
and this volume type, including ``in_use``, ``limit`` and ``reserved``
attributes.
in: body
required: true
type: object
gigabytes_usage:
description: |
The size (GB) usage information of volumes and snapshots for this project,
including ``in_use``, ``limit`` and ``reserved`` attributes.
in: body
required: true
type: object
group:
description: |
A group object.
in: body
required: true
type: object
group_id:
description: |
The ID of the group.
in: body
required: true
type: string
group_id_optional:
description: |
The ID of the group.
in: body
required: false
type: string
min_version: 3.13
group_name:
description: |
The group name.
in: body
required: true
type: string
group_replication_status:
description: |
The group replication status.
in: body
required: false
type: string
min_version: 3.38
group_snapshot:
description: |
The group snapshot.
in: body
required: true
type: object
group_snapshot_id:
description: |
The ID of the group snapshot.
in: body
required: false
type: string
group_snapshot_id_3_14:
description: |
The ID of the group snapshot.
in: body
required: true
type: string
min_version: 3.14
group_snapshot_id_req:
description: |
The ID of the group snapshot.
in: body
required: true
type: string
group_snapshots:
description: |
A collection of group snapshots.
in: body
required: true
type: array
group_specs:
description: |
A set of key and value pairs that contains the
specifications for a group type.
in: body
required: false
type: object
group_specs_req:
description: |
A set of key and value pairs that contains the
specifications for a group type.
in: body
required: true
type: object
group_type:
description: |
A ``group_type`` object.
in: body
required: true
type: object
group_type_id:
description: |
The group type ID.
in: body
required: true
type: string
group_types:
description: |
The list of group types.
in: body
required: true
type: array
groups:
description: |
A collections of groups.
in: body
required: true
type: array
groups_number:
description: |
The number of groups that are allowed for each project.
in: body
required: true
type: integer
groups_number_usage:
description: |
The group usage information for this project, including ``in_use``,
``limit`` and ``reserved`` attributes.
in: body
required: true
type: object
guaranteed_until:
description: |
The expire time of the message, this message could be
deleted after this time.
in: body
required: false
type: string
has_dependent_backups:
description: |
If this value is ``true``, there are other backups depending on
this backup.
in: body
required: false
type: boolean
host:
description: |
The OpenStack Block Storage host where the
existing volume resides.
in: body
required: true
type: object
host_mutex:
description: |
The OpenStack Block Storage host where the existing resource resides.
Optional only if cluster field is provided.
in: body
required: false
type: string
host_name:
description: |
The name of the attaching host.
in: body
required: false
type: string
host_name_backend:
description: |
The name of the host that hosts the storage backend,
may take the format of ``host@backend``.
in: body
required: true
type: string
host_name_body:
description: |
The name of the host.
in: body
required: false
type: string
host_name_body_req:
description: |
The name of the host.
in: body
required: true
type: string
host_service:
description: |
The name of the service which is running on the host.
in: body
required: true
type: string
host_service_status:
description: |
The status of the service. One of ``available`` or ``unavailable``.
in: body
required: true
type: string
hosts:
description: |
A OpenStack Block Storage host.
in: body
required: true
type: object
id:
description: |
The UUID of the object.
in: body
required: true
type: string
id_backup:
description: |
The UUID of the backup.
in: body
required: true
type: string
id_message:
description: |
The UUID for the message.
in: body
required: true
type: string
id_qos_spec:
description: |
The generated ID for the QoS specification.
in: body
required: true
type: string
id_snap:
description: |
The snapshot UUID.
in: body
required: true
type: string
id_vol:
description: |
The UUID of the volume.
in: body
required: true
type: string
image_id:
description: |
The uuid for the new image.
in: body
required: true
type: string
image_name:
description: |
The name for the new image.
in: body
required: true
type: string
imageRef:
description: |
The UUID of the image from which you want to
create the volume. Required to create a bootable volume.
**New in version 3.46**: Instead of directly consuming a zero-byte
image that has been created by the Compute service when an instance
snapshot was requested, the Block Storage service will use the
``snapshot_id`` contained in the ``block_device_mapping`` image
property to locate the volume snapshot, and will use that to create
the volume instead.
in: body
required: false
type: string
incremental:
description: |
The backup mode. A valid value is ``true`` for
incremental backup mode or ``false`` for full backup mode. Default
is ``false``. See :ref:`valid boolean values `
in: body
required: false
type: boolean
instance_uuid:
description: |
The UUID of the attaching instance.
in: body
required: false
type: string
instance_uuid_req:
description: |
The UUID of the attaching instance.
in: body
required: true
type: string
is_incremental:
description: |
Indicates whether the backup mode is incremental.
If this value is ``true``, the backup mode is incremental. If this
value is ``false``, the backup mode is full.
in: body
required: false
type: boolean
is_public_group_type_optional:
description: |
Whether the group type is publicly visible.
See :ref:`valid boolean values `
in: body
required: false
type: boolean
is_public_group_type_required:
description: |
Whether the group type is publicly visible.
in: body
required: true
type: boolean
is_public_volume_type_optional:
description: |
Whether the volume type is publicly visible.
See :ref:`valid boolean values `
in: body
required: false
type: boolean
is_public_volume_type_required:
description: |
Whether the volume type is publicly visible.
in: body
required: true
type: boolean
is_up:
description: |
Filter by up/down status.
See :ref:`valid boolean values `
in: body
required: false
type: boolean
key:
description: |
The metadata key name for the metadata that you
want to remove.
in: body
required: true
type: string
key_size:
description: |
Size of encryption key, in bits. This is usually 256. The default value
is None.
in: body
required: false
type: integer
keys:
description: |
List of Keys.
in: body
required: true
type: array
last_heartbeat:
description: |
Find the operational latency between this server/cluster and the other members of the replica set.
in: body
required: false
type: string
levels:
description: |
The current log level that queried.
in: body
required: true
type: object
levels_set:
description: |
The log level to set, case insensitive, accepted values are ``INFO``,
``WARNING``, ``ERROR`` and ``DEBUG``.
in: body
required: true
type: string
limit_usage:
description: |
The limit data size. Visible only if you set the
``usage=true`` query parameter.
in: body
required: false
type: integer
limits:
description: |
A list of ``limit`` objects.
in: body
required: true
type: object
links:
description: |
Links for the volume transfer.
in: body
required: true
type: array
links_backup:
description: |
Links for the backup.
in: body
required: true
type: array
links_message:
description: |
Links for the message.
in: body
required: false
type: array
links_qos:
description: |
The QoS specification links.
in: body
required: true
type: array
links_res:
description: |
Links to the resources in question.
in: body
required: true
type: array
links_snap:
description: |
Links for the snapshot.
in: body
required: false
type: array
links_vol:
description: |
The volume links.
in: body
required: true
type: array
links_vol_optional:
description: |
The volume links.
in: body
required: false
type: array
location:
description: |
Full URL to a service or server.
format: uri
in: body
required: true
type: string
log_levels:
description: |
The list of log levels.
in: body
required: true
type: array
manageable-snapshots:
description: |
A list of manageable snapshots.
in: body
required: true
type: list
manageable-volumes:
description: |
A list of manageable volumes.
in: body
required: true
type: list
max_size:
description: |
The maximum total size for the cluster.
in: body
required: true
type: integer
maxTotalBackupGigabytes:
description: |
The maximum total amount of backups, in gibibytes
(GiB).
in: body
required: true
type: integer
maxTotalBackups:
description: |
The maximum number of backups.
in: body
required: true
type: integer
maxTotalGroups:
description: |
The maximum number of groups.
in: body
required: true
type: integer
maxTotalSnapshots:
description: |
The maximum number of snapshots.
in: body
required: true
type: integer
maxTotalSnapshotsOptional:
description: |
The maximum number of snapshots.
in: body
required: false
type: integer
maxTotalVolumeGigabytes:
description: |
The maximum total amount of volumes, in gibibytes
(GiB).
in: body
required: true
type: integer
maxTotalVolumeGigabytesOptional:
description: |
The maximum total amount of volumes, in gibibytes (GiB).
in: body
required: true
type: integer
maxTotalVolumes:
description: |
The maximum number of volumes.
in: body
required: true
type: integer
maxTotalVolumesOptional:
description: |
The maximum number of volumes.
in: body
required: false
type: integer
media_types:
description: |
The `media types
`_.
It is an array of a fixed dict.
.. note::
It is vestigial and provide no useful information.
It will be deprecated and removed in the future.
in: body
required: true
type: array
message_level:
description: |
The level of the message, possible value is
only 'ERROR' now.
in: body
required: true
type: string
meta:
description: |
The metadata key and value pair for the volume.
in: body
required: true
type: object
meta_snap:
description: |
The metadata key and value pair for the snapshot.
in: body
required: true
type: object
metadata:
description: |
One or more metadata key and value pairs for the
snapshot, if any.
in: body
required: true
type: object
metadata_backup:
description: |
The backup metadata key value pairs.
in: body
required: false
type: object
min_version: 3.43
metadata_image:
description: |
The image metadata to add to the volume as a set
of metadata key and value pairs.
in: body
required: true
type: object
metadata_snap:
description: |
One or more metadata key and value pairs for the
snapshot.
in: body
required: false
type: object
metadata_vol:
description: |
One or more metadata key and value pairs to be associated
with the new volume.
in: body
required: false
type: object
metadata_vol_assoc:
description: |
One or more metadata key and value pairs that are
associated with the volume.
in: body
required: false
type: object
metadata_vol_assoc_req:
description: |
One or more metadata key and value pairs that are
associated with the volume.
in: body
required: true
type: object
metadata_vol_obj:
description: |
A ``metadata`` object. Contains one or more
metadata key and value pairs that are associated with the volume.
in: body
required: true
type: object
migrate_cluster:
description: |
The target cluster for the volume migration. Cluster format is
``cluster@backend``. Starting with microversion 3.16, either ``cluster``
or ``host`` must be specified. If ``host`` is specified and is part of a
cluster, the cluster is used as the target for the migration.
in: body
required: false
type: string
min_version: 3.16
migrate_force_host_copy:
description: |
If false (the default), rely on the volume backend driver to perform
the migration, which might be optimized. If true, or the volume driver
fails to migrate the volume itself, a generic host-based migration is
performed.
in: body
required: false
type: boolean
migrate_host:
description: |
The target host for the volume migration. Host format is ``host@backend``.
Required before microversion 3.16.
in: body
required: false
type: string
migrate_lock_volume:
description: |
If true, migrating an ``available`` volume will change its status to
``maintenance`` preventing other operations from being performed on the
volume such as attach, detach, retype, etc.
in: body
required: false
type: boolean
migration_completion_error:
description: |
Used to indicate if an error has occured elsewhere that requires clean up.
in: body
required: false
type: boolean
# NOTE(mriedem): We can update the migration_policy retype note about encrypted
# in-use volumes not being supported once
# https://bugzilla.redhat.com/show_bug.cgi?id=760547 is fixed.
migration_policy:
description: |
Specify if the volume should be migrated when it is re-typed.
Possible values are ``on-demand`` or ``never``. If not specified, the
default is ``never``.
.. note:: If the volume is attached to a server instance and will be
migrated, then by default policy only users with the administrative role
should attempt the retype operation. A retype which involves a migration
to a new host for an *in-use* encrypted volume is not supported.
in: body
required: false
type: string
migration_status:
description: |
The volume migration status. Admin only.
in: body
required: false
type: string
min_size:
description: |
The maximum total size for the cluster.
in: body
required: true
type: integer
mountpoint:
description: |
The attaching mount point.
in: body
required: true
type: string
multiattach:
description: |
Enable creating multiattach volumes
in: body
required: false
type: string
multiattach_resp:
description: |
If true, this volume can attach to more than one
instance.
in: body
required: true
type: boolean
name:
description: |
The name of the object.
in: body
required: false
type: string
name_backend_pool:
description: |
The name of the backend pool.
in: body
required: true
type: string
name_backup:
description: |
The backup name.
in: body
required: true
type: string
name_cgsnap:
description: |
The consistency group snapshot name.
in: body
required: true
type: string
name_consis:
description: |
The consistency group name.
in: body
required: false
type: string
name_group:
description: |
The group name.
in: body
required: false
type: string
name_group_snap:
description: |
The group snapshot name.
in: body
required: false
type: string
name_group_snap_req:
description: |
The group snapshot name.
in: body
required: true
type: string
name_group_type:
description: |
The group type name.
in: body
required: true
type: string
name_optional:
description: |
The name of the Volume Backup.
in: body
required: false
type: string
name_qos_spec:
description: |
The name of the QoS specification.
in: body
required: true
type: string
name_snap:
description: |
The name of the snapshot. Default is ``None``.
in: body
required: false
type: string
name_snap_req:
description: |
The name of the snapshot.
in: body
required: true
type: string
name_vol:
description: |
The volume name.
in: body
required: true
type: string
name_volume_type_optional:
description: |
The name of the volume type.
in: body
required: false
type: string
name_volume_type_required:
description: |
The name of the volume type.
in: body
required: true
type: string
namespace:
description: |
Link associated to the extension.
in: body
required: false
type: string
namespace_storage:
description: |
The storage namespace, such as
``OS::Storage::Capabilities::foo``.
in: body
required: true
type: string
new_size:
description: |
The new size of the volume, in gibibytes (GiB).
in: body
required: true
type: integer
new_type:
description: |
The new volume type that volume is changed with.
in: body
required: true
type: string
new_volume:
description: |
The UUID of the new volume.
in: body
required: true
type: string
no_snapshots:
description: |
Transfer volume without snapshots. Defaults to False if not specified.
in: body
required: false
min_version: 3.55
type: boolean
nodes:
description: |
A list of the UUIDs of node objects which are members of the current cluster.
in: body
required: false
type: string
num_down_hosts:
description: |
A list of the hosts innoperates
in: body
required: false
type: integer
num_hosts:
description: |
List of all hosts.
in: body
required: false
type: integer
object_count:
description: |
The number of objects in the backup.
in: body
required: true
type: integer
os-attach:
description: |
The ``os-attach`` action.
in: body
required: true
type: object
os-backup-project-attr:project_id:
description: |
The UUID of the owning project.
in: body
required: true
type: string
min_version: 3.18
os-begin_detaching:
description: |
The ``os-begin_detaching`` action.
in: body
required: true
type: object
os-detach:
description: |
The ``os-detach`` action.
in: body
required: true
type: object
os-ext-snap-attr:progress:
description: |
A percentage value for the build progress.
in: body
required: true
type: string
os-ext-snap-attr:project_id:
description: |
The UUID of the owning project.
in: body
required: true
type: string
os-extend:
description: |
The ``os-extend`` action.
in: body
required: true
type: object
os-extend_volume_completion:
description: |
The ``os-extend_volume_completion`` action.
in: body
required: true
type: object
os-force_delete:
description: |
The ``os-force_delete`` action.
in: body
required: true
type: string
os-force_detach:
description: |
The ``os-force_detach`` action.
in: body
required: true
type: object
os-initialize_connection:
description: |
The ``os-initialize_connection`` action.
in: body
required: true
type: object
os-migrate_volume:
description: |
The ``os-migrate_volume`` action.
in: body
required: true
type: object
os-migrate_volume_completion:
description: |
The ``os-migrate_volume_completion`` action.
in: body
required: true
type: object
os-reimage:
description: |
The ``os-reimage`` action.
in: body
required: true
type: object
min_version: 3.68
os-reserve:
description: |
The ``os-reserve`` action.
in: body
required: true
type: object
os-reset_status:
description: |
The ``os-reset_status`` action.
in: body
required: true
type: object
os-retype:
description: |
The ``os-retype`` action.
in: body
required: true
type: object
os-roll_detaching:
description: |
The ``os-roll_detaching`` action.
in: body
required: true
type: object
OS-SCH-HNT:scheduler_hints:
description: |
The dictionary of data to send to the scheduler.
in: body
required: false
type: object
os-set_bootable:
description: |
The ``os-set_bootable`` action.
in: body
required: true
type: object
os-set_image_metadata:
description: |
The ``os-set_image_metadata`` action.
in: body
required: true
type: object
os-show_image_metadata:
description: |
The ``os-show_image_metadata`` action.
in: body
require: true
type: object
os-terminate_connection:
description: |
The ``os-terminate_connection`` action.
in: body
require: true
type: object
os-unmanage:
description: |
The ``os-unmanage`` action. This action removes
the specified volume from Cinder management.
in: body
required: true
type: object
os-unreserve:
description: |
The ``os-unreserve`` action.
in: body
required: true
type: object
os-unset_image_metadata:
description: |
The ``os-unset_image_metadata`` action. This
action removes the key-value pairs from the image metadata.
in: body
required: true
type: object
os-update_readonly_flag:
description: |
The ``os-update_readonly_flag`` action. This action
enables or disables update of volume to read-only access mode.
in: body
required: true
type: object
os-update_snapshot_status:
description: |
The ``os-update_snapshot_status`` action.
in: body
required: true
type: object
os-vol-host-attr:host:
description: |
Current back-end of the volume.
Host format is ``host@backend#pool``.
in: body
required: false
type: string
os-vol-mig-status-attr:migstat:
description: |
The status of this volume migration (None means
that a migration is not currently in progress).
in: body
required: false
type: string
os-vol-mig-status-attr:name_id:
description: |
The volume ID that this volume name on the back-
end is based on.
in: body
required: false
type: string
os-vol-tenant-attr:tenant_id:
description: |
The project ID which the volume belongs to.
in: body
required: true
type: string
os-volume_upload_image:
description: |
The ``os-volume_upload_image`` action. This
action uploads the specified volume to image service.
in: body
required: true
type: object
per_volume_gigabytes:
description: |
The size (GB) of volumes that are allowed for each volume.
in: body
required: true
type: integer
per_volume_gigabytes_usage:
description: |
The size (GB) usage information for each volume, including ``in_use``,
``limit`` and ``reserved`` attributes.
in: body
required: true
type: object
policies:
description: |
A list of UUIDs of the policies attached to current cluster.
in: body
required: false
type: string
pool_name:
description: |
The name of the storage pool.
in: body
required: true
type: string
pools:
description: |
List of storage pools.
in: body
required: true
type: array
prefix:
description: |
The prefix for the log path we are querying,
for example ``cinder.`` or ``sqlalchemy.engine``.
When not present or the empty string is passed all
log levels will be retrieved.
in: body
required: false
type: string
profile_id:
description: |
The UUID of the profile.
in: body
required: false
type: string
min_version: 3.7
profile_name:
description: |
The name of a profile object. The name must start
with an ASCII letter and can contain ASCII letters,
digits, underscores, periods, and hyphens and its
length must be less than 255
in: body
required: true
type: string
project:
description: |
The ID of the project. Volume Type access to be
added to this project ID.
in: body
required: true
type: string
project_id:
description: |
The UUID of the project.
in: body
required: true
type: string
project_id_group:
description: |
The UUID of the volume group project.
in: body
required: false
type: string
min_version: 3.58
project_id_group_snapshot:
description: |
The UUID of the volume group snapshot project.
in: body
required: false
type: string
min_version: 3.58
project_id_host:
description: |
The Project ID which the host resource belongs to.
In the summary resource, the value is ``(total)``.
in: body
required: true
type: string
properties:
description: |
The backend volume capabilities list, which is
consisted of cinder standard capabilities and vendor unique
properties.
in: body
required: true
type: object
protected:
description: |
Whether the new image is protected. Default=False.
See :ref:`valid boolean values `
in: body
required: false
type: boolean
min_version: 3.1
provider: # required response parameter (get/create)
description: |
The class that provides encryption support.
in: body
required: true
type: string
provider_id:
description: |
The provider ID for the volume. The value is either a string set by the
driver or ``null`` if the driver doesn't use the field or if it hasn't
created it yet. Only returned for administrators.
in: body
required: false
type: string
min_version: 3.21
provider_optional: # optional response parameter (update)
description: |
The class that provides encryption support.
in: body
required: false
type: string
provider_req: # required request parameter (create)
description: |
The class that provides encryption support. Choices are:
* luks - relies on Linux Unified Key Setup (recommended)
* plain - relies on dm-crypt
in: body
required: true
type: string
provider_req_optional: # optional request parameter (update)
description: |
The class that provides encryption support. Choices are:
* luks - relies on Linux Unified Key Setup (recommended)
* plain - relies on dm-crypt
in: body
required: false
type: string
qos_association_id:
description: |
The Qos association ID.
in: body
required: true
type: string
qos_association_name:
description: |
The QoS association name.
in: body
required: true
type: string
qos_association_type:
description: |
The QoS association type.
in: body
required: true
type: string
qos_associations:
description: |
A collection of ``QoS associations``.
in: body
required: true
type: array
qos_set_id:
description: |
The QoS set ID.
in: body
required: true
type: string
qos_specs:
description: |
A ``qos_specs`` object.
in: body
required: true
type: object
qos_specs_id:
description: |
The QoS specifications ID.
in: body
required: false
type: string
QoS_support:
description: |
The quality of service (QoS) support.
in: body
required: true
type: boolean
quota_class_id:
description:
The name of the quota class set.
in: body
required: true
type: string
quota_class_set:
description: |
A ``quota_class_set`` object.
in: body
required: true
type: object
quota_set:
description: |
A ``quota_set`` object.
in: body
required: true
type: object
rate:
description: |
Rate-limit volume copy bandwidth, used to
mitigate slow down of data access from the instances.
in: body
required: true
type: array
readonly:
description: |
Enables or disables read-only access mode.
This value can be True, true, False, false.
in: body
required: true
type: boolean
reason_not_safe:
description: |
The reason why the resource can't be managed.
in: body
required: true
type: string
ref:
description: |
A reference to the existing volume. The internal
structure of this reference depends on the volume driver
implementation. For details about the required elements in the
structure, see the documentation for the volume driver.
in: body
required: true
type: object
reference:
description: |
Some information for the resource.
in: body
required: true
type: object
reimage_reserved:
description: |
Normally, volumes to be re-imaged are in ``available`` or ``error`` status.
When ``true``, this parameter will allow a volume in the ``reserved`` status
to be re-imaged. The ability to re-image a volume in ``reserved`` status
may be restricted to administrators in some clouds. Default value is ``false``.
in: body
required: false
type: boolean
remove_project_access:
description: |
Removes volume type access from a project.
in: body
required: true
type: object
remove_volumes:
description: |
One or more volume UUIDs, separated by commas, to
remove from the volume group or consistency group.
in: body
required: false
type: string
replication_status:
description: |
The volume replication status.
in: body
required: true
type: string
replication_status_cvol:
description: |
The volume service replication status. Only in ``cinder-volume`` service.
in: body
required: false
type: string
replication_targets:
description: |
A list of volume backends used to replicate volumes
on this backend.
in: body
required: true
type: list
replication_targets_unique_key:
description: |
Vendor specific key-values. Only returned if administrator.
in: body
type: string
request_id:
description: |
The id of the request during which the message was created.
in: body
required: true
type: string
reserved_percentage:
description: |
The percentage of the total capacity that is
reserved for the internal use by the back end.
in: body
required: true
type: integer
reset_status:
description: |
The ``reset_status`` action.
in: body
required: true
type: object
resource_fil:
description: |
Resource which the filters will be applied to.
in: body
required: true
type: string
resource_filters:
description: |
The resource filter array.
in: body
required: true
type: array
resource_filters_coll:
description: |
A collection of resource filters.
in: body
required: true
type: array
resource_id:
description: |
The UUID of a resource to cleanup.
in: body
required: false
type: string
resource_type:
description: |
The resource type corresponding to ``resource_uuid``.
in: body
required: false
type: string
resource_uuid:
description: |
The UUID of the resource during whose operation the
message was created.
in: body
required: false
type: string
restore:
description: |
A ``restore`` object.
in: body
required: true
type: object
revert:
description: |
The ``revert`` action.
in: body
required: true
type: object
safe_to_manage:
description: |
If the resource can be managed or not.
in: body
required: true
type: boolean
security_group_rules:
description: |
The number of rules that are allowed for each
security group.
in: body
required: false
type: integer
security_groups:
description: |
The number of security groups that are allowed
for each project.
in: body
required: true
type: integer
service_id:
description: |
UUID for the cleanup service.
in: body
required: false
type: integer
service_key:
description: |
The service name. Deprecated. Keeping service key
for API compatibility.
in: body
required: true
type: string
service_state:
description: |
The state of the service. One of ``enabled`` or ``disabled``.
in: body
required: true
type: string
service_state_up_down:
description: |
The state of the service. One of ``up`` or ``down``.
in: body
required: true
type: string
service_status:
description: |
The status of the service. One of ``enabled`` or ``disabled``.
in: body
required: true
type: string
service_uuid:
description: |
A unique identifier that's used to indicate what node the volume-service
for a particular volume is being serviced by.
in: body
required: true
type: string
min_version: 3.48
services:
description: |
A list of service objects.
in: body
required: true
type: array
shared_targets:
description: |
An indicator whether the back-end hosting the volume utilizes
shared_targets or not. Default=True.
in: body
required: true
type: boolean
min_version: 3.48
max_version: 3.68
shared_targets_tristate:
description: |
An indicator whether the host connecting the volume should lock for the
whole attach/detach process or not. ``true`` means only is iSCSI initiator
running on host doesn't support manual scans, ``false`` means never use
locks, and ``null`` means to always use locks. Look at os-brick's
``guard_connection`` context manager. Default=True.
in: body
required: true
type: boolean
min_version: 3.69
size:
description: |
The size of the volume, in gibibytes (GiB).
in: body
required: true
type: integer
snapshot:
description: |
A partial representation of a snapshot used in
the creation process.
in: body
required: true
type: string
snapshot_id:
description: |
To create a volume from an existing snapshot,
specify the UUID of the volume snapshot. The volume is created in
same availability zone and with same size as the snapshot.
in: body
required: false
type: string
snapshot_id_backup:
description: |
The UUID of the source snapshot that you want to back up.
in: body
required: false
type: string
snapshot_id_revert:
description: |
The UUID of the snapshot. The API
reverts the volume with this snapshot.
in: body
required: true
type: string
snapshot_id_source_vol:
description: |
The UUID of the source volume snapshot.
in: body
required: false
type: string
snapshot_name:
description: |
The name of the snapshot.
in: body
required: false
type: string
snapshot_obj:
description: |
A ``snapshot`` object.
in: body
required: true
type: object
snapshot_progress:
description: |
A percentage value for snapshot build progress.
in: body
required: false
type: string
snapshots_number:
description: |
The number of snapshots that are allowed for each project.
in: body
required: true
type: integer
snapshots_number_for_type:
description: |
The number of snapshots that are allowed for each project and
the specified volume type.
in: body
required: true
type: integer
snapshots_number_for_type_usage:
description: |
The snapshot usage information for this project and this volume type,
including ``in_use``, ``limit`` and ``reserved`` attributes.
in: body
required: true
type: object
snapshots_number_usage:
description: |
The snapshot usage information for this project, including ``in_use``,
``limit`` and ``reserved`` attributes.
in: body
required: true
type: object
source-name:
description: |
The resource's name.
in: body
required: true
type: string
source_cgid:
description: |
The UUID of the source consistency group.
in: body
required: false
type: string
source_group_id:
description: |
The UUID of the source group.
in: body
required: false
type: string
source_group_id_req:
description: |
The UUID of the source group.
in: body
required: true
type: string
source_project_id:
description: |
Records the source project_id before volume transfer.
in: body
required: false
type: string
min_version: 3.57
source_reference:
description: |
The snapshot's origin volume information.
in: body
required: true
type: object
source_volid:
description: |
The UUID of the source volume. The API creates a new volume with the same
size as the source volume unless a larger size is requested.
in: body
required: false
type: string
spec_value:
description: |
The value of the group specification corresponding to the specified key.
in: body
required: true
type: string
specs:
description: |
A ``specs`` object.
in: body
required: true
type: object
state:
description: |
The ''state'' of the cluster. One for "up" or "down".
in: body
required: true
type: string
status:
description: |
The ``status`` of the consistency group snapshot.
in: body
required: false
type: string
status_attachment:
description: |
The status of the attachment.
in: body
required: true
type: string
status_backup:
description: |
The backup status. Refer to Backup statuses table
for the possible status value.
in: body
required: true
type: string
status_backup_action:
description: |
The status for the backup.
in: body
required: true
type: string
status_consis:
description: |
The status of the consistency group.
in: body
required: true
type: string
status_group:
description: |
The status of the generic group.
in: body
required: true
type: string
status_group_snap:
description: |
The ``status`` of the generic group snapshot.
in: body
required: true
type: string
status_reason:
description: |
The string representation of the reason why the object
has transited to its current status.
in: body
required: false
type: string
status_snap:
description: |
The status for the snapshot.
in: body
required: true
type: string
status_vol:
description: |
The volume status.
in: body
required: true
type: string
storage_protocol:
description: |
The storage back end for the back-end volume. For
example, ``iSCSI`` or ``FC``.
in: body
required: true
type: string
summary_metadata:
description: |
The dictionary of lists contains all the volumes' metadata,
classified by metadata key.
in: body
required: true
type: object
min_version: 3.36
timeout:
descripition: |
The default timeout value (in seconds)
of cluster operations.
in: body
required: false
type: integer
min_version: 3.7
total_capacity:
description: |
The total capacity for the back-end volume, in
GBs. A valid value is a string, such as ``unknown``, or a
number (integer or floating point).
in: body
required: true
type: string
total_count:
description: |
Total number of volumes.
in: body
required: true
type: string
total_count_int:
description: |
Total number of volumes.
in: body
required: true
type: integer
total_size:
description: |
Total size of volumes in GB.
in: body
required: true
type: integer
totalBackupGigabytesUsed:
description: |
The total number of backups gibibytes (GiB) used.
in: body
required: true
type: integer
totalBackupsUsed:
description: |
The total number of backups used.
in: body
required: true
type: integer
totalGigabytesUsed:
description: |
The total number of gibibytes (GiB) used.
in: body
required: true
type: integer
totalGigabytesUsedStr:
description: |
The total number of gibibytes (GiB) used.
in: body
required: true
type: string
totalSnapGigabytesUsed:
description: |
The total number of gibibytes (GiB) used by snapshots.
in: body
required: true
type: string
totalSnapshotsUsed:
description: |
The total number of snapshots used.
in: body
required: true
type: string
totalSnapshotsUsed_int:
description: |
The total number of snapshots used.
in: body
required: true
type: integer
totalVolumesUsed:
description: |
The total number of volumes used.
in: body
required: true
type: integer
transfer:
description: |
The volume transfer object.
in: body
required: true
type: object
transfer_name:
description: |
The name of the volume transfer.
in: body
required: true
type: string
transfer_obj_id:
description: |
The UUID of the volume transfer.
in: body
required: true
type: string
transfers:
description: |
List of transfer details.
in: body
required: true
type: array
updated:
description: |
The date and time stamp when the extension was
last updated.
in: body
required: true
type: string
updated_at:
description: |
The date and time when the resource was updated.
The date and time stamp format is `ISO 8601
`_:
::
CCYY-MM-DDThh:mm:ss±hh:mm
For example, ``2015-08-27T09:49:58-05:00``.
The ``±hh:mm`` value, if included, is the time zone as an offset
from UTC. In the previous example, the offset value is ``-05:00``.
If the ``updated_at`` date and time stamp is not set, its value is
``null``.
in: body
required: true
type: string
user_id:
description: |
The UUID of the user.
in: body
required: true
type: string
user_id_backup:
description: |
The UUID of the project owner.
in: body
required: true
type: string
min_version: 3.56
user_id_min:
description: |
The UUID of the user.
in: body
required: true
type: string
min_version: 3.41
user_message:
description: |
The translated readable message corresponding to ``event_id``.
in: body
required: true
type: string
user_messages:
description: |
A collection of user messages.
in: body
required: true
type: string
vendor_name:
description: |
The name of the vendor.
in: body
required: true
type: string
version_id:
in: body
required: true
description: |
A common name for the version in question. Informative only, it
has no real semantic meaning.
type: string
version_max:
in: body
required: true
description: |
If this version of the API supports microversions, the maximum
microversion that is supported. This will be the empty string if
microversions are not supported.
type: string
version_min:
in: body
required: true
description: |
If this version of the API supports microversions, the minimum
microversion that is supported. This will be the empty string if
microversions are not supported.
type: string
version_status:
in: body
required: true
description: |
The status of this API version. This can be one of:
- ``CURRENT``: this is the preferred version of the API to use
- ``DEPRECATED``: a deprecated version of the API that is slated for removal
type: string
version_updated:
description: |
This is a fixed string that API version updates.
in: body
required: true
type: string
visibility:
description: |
The volume type access.
in: body
required: true
type: string
visibility_min:
description: |
The visibility property of the new image. Default is private.
in: body
required: false
type: string
min_version: 3.1
volume:
description: |
A ``volume`` object.
in: body
required: true
type: object
volume-summary:
description: |
Dictionary of ``volume-summary`` objects.
in: body
required: true
type: object
volume_backend_name:
description: |
The name of the back-end volume.
in: body
required: true
type: string
volume_cluster_name:
description: |
The cluster name of volume backend.
in: body
required: false
type: string
min_version: 3.61
volume_id:
description: |
The UUID of the volume.
in: body
required: true
type: string
volume_id_attachment:
description: |
The UUID of the volume which the attachment belongs
to.
in: body
required: true
type: string
volume_id_backup:
description: |
The UUID of the volume that you want to back up.
in: body
required: true
type: string
volume_id_restore:
description: |
The UUID of the volume to which you want to
restore a backup.
in: body
required: false
type: string
volume_id_snap:
description: |
If the snapshot was created from a volume, the
volume ID.
in: body
required: true
type: string
volume_ids:
description: |
A list of ``volume`` ids, available only when ``list_volume`` set true.
in: body
required: false
type: array
min_version: 3.25
volume_image_metadata:
description: |
List of image metadata entries. Only included for volumes that were
created from an image, or from a snapshot of a volume originally created
from an image.
in: body
required: false
type: object
volume_name:
description: |
The volume name.
in: body
required: true
type: string
volume_name_optional:
description: |
The volume name.
in: body
required: false
type: string
volume_type:
description: |
A ``volume_type`` object.
in: body
required: true
type: object
volume_type_access:
description: |
List of objects containing volume type to be accessed by project.
in: body
required: true
type: array
volume_type_detail:
description: |
The volume type (either name or ID). To create an environment with
multiple-storage back ends, you must specify a volume type. Block
Storage volume back ends are spawned as children to ``cinder-
volume``, and they are keyed from a unique queue. They are named
``cinder- volume.HOST.BACKEND``. For example, ``cinder-
volume.ubuntu.lvmdriver``. When a volume is created, the scheduler
chooses an appropriate back end to handle the request based on the
volume type. Default is ``None``. For information about how to
use volume types to create multiple- storage back ends, see
`Configure multiple-storage back ends
`_.
in: body
required: false
type: string
volume_type_id_363:
description: |
The associated volume type ID for the volume.
in: body
required: true
type: object
min_version: 3.63
volume_type_id_body:
description: |
The UUID of the volume type.
in: body
required: true
type: string
volume_type_vol:
description: |
The associated volume type name for the volume.
in: body
required: true
type: string
volume_types:
description: |
The list of volume types. In an environment with
multiple-storage back ends, the scheduler determines where to send
the volume based on the volume type. For information about how to
use volume types to create multiple- storage back ends, see
`Configure multiple-storage back ends
`_.
in: body
required: true
type: array
volume_types_commas:
description: |
The list of volume types separated by commas. In an environment with
multiple-storage back ends, the scheduler determines where to send
the volume based on the volume type. For information about how to
use volume types to create multiple-storage back ends, see
`Configure multiple-storage back ends
`_.
in: body
required: true
type: string
volumes:
description: |
A list of ``volume`` objects.
in: body
required: true
type: array
volumes_number:
description: |
The number of volumes that are allowed for each project.
in: body
required: true
type: integer
volumes_number_for_type:
description: |
The number of volumes that are allowed for each project and
the specified volume type.
in: body
required: true
type: integer
volumes_number_for_type_usage:
description: |
The volume usage information for this project and this volume type,
including ``in_use``, ``limit`` and ``reserved`` attributes.
in: body
required: true
type: object
volumes_number_usage:
description: |
The volume usage information for this project, including ``in_use``,
``limit`` and ``reserved`` attributes.
in: body
required: true
type: object
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/qos-specs-v3-qos-specs.inc 0000664 0000000 0000000 00000014520 15131732575 0026416 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Quality of service (QoS) specifications (qos-specs)
===================================================
Administrators only, depending on policy settings.
Creates, lists, shows details for, associates, disassociates, sets
keys, unsets keys, and deletes quality of service (QoS)
specifications.
Disassociate a QoS specification from all associations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/qos-specs/{qos_id}/disassociate_all
Disassociates a QoS specification from all associations.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- qos_id: qos_id
Unset keys in a QoS specification
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/{project_id}/qos-specs/{qos_id}/delete_keys
Unsets keys in a QoS specification.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- qos_id: qos_id
- keys: keys
Request Example
---------------
.. literalinclude:: ./samples/qos/qos-unset-request.json
:language: javascript
Get all associations for a QoS specification
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/qos-specs/{qos_id}/associations
Lists all associations for a QoS specification.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- qos_id: qos_id
Response
--------
.. rest_parameters:: parameters.yaml
- qos_associations: qos_associations
- type: qos_association_type
- id: qos_association_id
- name: qos_association_name
Response Example
----------------
.. literalinclude:: ./samples/qos/qos_show_response.json
:language: javascript
Associate QoS specification with a volume type
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/qos-specs/{qos_id}/associate
Associates a QoS specification with a volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- qos_id: qos_id
- vol_type_id: vol_type_id_query
Disassociate QoS specification from a volume type
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/qos-specs/{qos_id}/disassociate
Disassociates a QoS specification from a volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- qos_id: qos_id
- vol_type_id: vol_type_id_query
Show a QoS specification details
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/qos-specs/{qos_id}
Shows details for a QoS specification.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 401
- 403
- 404
- 405
- 413
- 503
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- qos_id: qos_id
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- qos_specs: qos_specs
- specs: specs
- consumer: consumer
- name: name_qos_spec
- id: id_qos_spec
- links: links_qos
Response Example
----------------
.. literalinclude:: ./samples/qos/qos-show-response.json
:language: javascript
Set keys in a QoS specification
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/{project_id}/qos-specs/{qos_id}
Sets keys in a QoS specification.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- qos_id: qos_id
- qos_specs: qos_specs
Request Example
---------------
.. literalinclude:: ./samples/qos/qos-update-request.json
:language: javascript
Response
--------
.. rest_parameters:: parameters.yaml
- qos_specs: qos_specs
Response Example
----------------
.. literalinclude:: ./samples/qos/qos-update-response.json
:language: javascript
Delete a QoS specification
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: DELETE /v3/{project_id}/qos-specs/{qos_id}
Deletes a QoS specification.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- qos_id: qos_id
- force: force_del_qos
Create a QoS specification
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/qos-specs
Creates a QoS specification.
Specify one or more key and value pairs in the request body.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- qos_specs: qos_specs
- name: name_qos_spec
Request Example
---------------
.. literalinclude:: ./samples/qos/qos-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- qos_specs: qos_specs
- name: name_qos_spec
- links: links_qos
- id: id_qos_spec
- consumer: consumer
- specs: specs
Response Example
----------------
.. literalinclude:: ./samples/qos/qos-create-response.json
:language: javascript
List QoS Specifications
~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/qos-specs
Lists quality of service (QoS) specifications.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 300
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- sort: sort
- sort_key: sort_key
- sort_dir: sort_dir
- limit: limit
- offset: offset
- marker: marker
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- qos_specs: qos_specs
- specs: specs
- consumer: consumer
- id: id_qos_spec
- name: name_qos_spec
Response Example
----------------
.. literalinclude:: ./samples/qos/qos-list-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/quota-classes.inc 0000664 0000000 0000000 00000006121 15131732575 0025022 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Quota class set extension (os-quota-class-sets)
===============================================
Administrators only, depending on policy settings.
Shows and updates quota classes for a project.
Show quota classes for a project
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method::
GET /v3/{admin_project_id}/os-quota-class-sets/{quota_class_name}
Shows quota class set for a project. If no specific value for the quota class
resource exists, then the default value will be reported.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 403
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- quota_class_name: quota_class_name
- admin_project_id: admin_project_id
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- quota_class_set: quota_class_set
- backup_gigabytes: maxTotalBackupGigabytes
- backups: maxTotalBackups
- gigabytes: maxTotalVolumeGigabytes
- gigabytes_{volume_type}: gigabytes_for_type
- groups: maxTotalGroups
- per_volume_gigabytes: per_volume_gigabytes
- snapshots: maxTotalSnapshots
- snapshots_{volume_type}: snapshots_number_for_type
- volumes: maxTotalVolumes
- volumes_{volume_type}: volumes_number_for_type
- id: quota_class_id
Response Example
----------------
.. literalinclude:: ./samples/quota_classes/quota-classes-show-response.json
:language: javascript
Update quota classes for a project
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method::
PUT /v3/{admin_project_id}/os-quota-class-sets/{quota_class_name}
Updates quota class set for a project. If the ``quota_class_name`` key does not
exist, then the API will create one.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
- 403
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- admin_project_id: admin_project_id
- quota_class_name: quota_class_name
- gigabytes: maxTotalVolumeGigabytesOptional
- gigabytes_{volume_type}: gigabytes_for_type
- snapshots: maxTotalSnapshotsOptional
- snapshots_{volume_type}: snapshots_number_for_type
- volumes: maxTotalVolumesOptional
- volumes_{volume_type}: volumes_number_for_type
Request Example
---------------
.. literalinclude:: ./samples/quota_classes/quota-classes-update-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- quota_class_set: quota_class_set
- backup_gigabytes: maxTotalBackupGigabytes
- backups: maxTotalBackups
- gigabytes: maxTotalVolumeGigabytes
- gigabytes_{volume_type}: gigabytes_for_type
- groups: maxTotalGroups
- per_volume_gigabytes: per_volume_gigabytes
- snapshots: maxTotalSnapshots
- snapshots_{volume_type}: snapshots_number_for_type
- volumes: maxTotalVolumes
- volumes_{volume_type}: volumes_number_for_type
Response Example
----------------
.. literalinclude:: ./samples/quota_classes/quota-classes-update-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/quota-sets.inc 0000664 0000000 0000000 00000012425 15131732575 0024347 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Quota sets extension (os-quota-sets)
====================================
Administrators only, depending on policy settings.
Shows, updates, and deletes quotas for a project.
Show quotas for a project
~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{admin_project_id}/os-quota-sets/{project_id}
Shows quotas for a project.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- admin_project_id: admin_project_id
- project_id: quotas_project_id
- usage: usage
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- quota_set: quota_set
- id: project_id
- volumes: volumes_number
- volumes_{volume_type}: volumes_number_for_type
- snapshots: snapshots_number
- snapshots_{volume_type}: snapshots_number_for_type
- backups: backups_number
- groups: groups_number
- per_volume_gigabytes: per_volume_gigabytes
- gigabytes: gigabytes
- gigabytes_{volume_type}: gigabytes_for_type
- backup_gigabytes: backup_gigabytes
Response Example
----------------
.. literalinclude:: ./samples/quota_sets/quotas-show-response.json
:language: javascript
Show quota usage for a project
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method::
GET /v3/{admin_project_id}/os-quota-sets/{project_id}?{usage}=True
Shows quota usage for a project.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: quotas_project_id
- admin_project_id: admin_project_id
- usage: usage
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- quota_set: quota_set
- id: project_id
- volumes: volumes_number_usage
- volumes_{volume_type}: volumes_number_for_type_usage
- snapshots: snapshots_number_usage
- snapshots_{volume_type}: snapshots_number_for_type_usage
- backups: backups_number_usage
- groups: groups_number_usage
- per_volume_gigabytes: per_volume_gigabytes_usage
- gigabytes: gigabytes_usage
- gigabytes_{volume_type}: gigabytes_for_type_usage
- backup_gigabytes: backup_gigabytes_usage
Response Example
----------------
.. literalinclude:: ./samples/quota_sets/quotas-show-usage-response.json
:language: javascript
Update quotas for a project
~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/{admin_project_id}/os-quota-sets/{project_id}
Updates quotas for a project.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- admin_project_id: admin_project_id
- project_id: quotas_project_id
- quota_set: quota_set
- volumes: volumes_number
- volumes_{volume_type}: volumes_number_for_type
- snapshots: snapshots_number
- snapshots_{volume_type}: snapshots_number_for_type
- backups: backups_number
- groups: groups_number
- per_volume_gigabytes: per_volume_gigabytes
- gigabytes: gigabytes
- gigabytes_{volume_type}: gigabytes_for_type
- backup_gigabytes: backup_gigabytes
Request Example
---------------
.. literalinclude:: ./samples/quota_sets/quotas-update-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- quota_set: quota_set
- volumes: volumes_number
- volumes_{volume_type}: volumes_number_for_type
- snapshots: snapshots_number
- snapshots_{volume_type}: snapshots_number_for_type
- backups: backups_number
- groups: groups_number
- per_volume_gigabytes: per_volume_gigabytes
- gigabytes: gigabytes
- gigabytes_{volume_type}: gigabytes_for_type
- backup_gigabytes: backup_gigabytes
Response Example
----------------
.. literalinclude:: ./samples/quota_sets/quotas-update-response.json
:language: javascript
Delete quotas for a project
~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: DELETE /v3/{admin_project_id}/os-quota-sets/{project_id}
Deletes quotas for a project so the quotas revert to default values.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: quotas_project_id
- admin_project_id: admin_project_id
Get default quotas for a project
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method::
GET /v3/{admin_project_id}/os-quota-sets/{project_id}/defaults
Gets default quotas for a project.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- admin_project_id: admin_project_id
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- quota_set: quota_set
- id: project_id
- volumes: volumes_number
- volumes_{volume_type}: volumes_number_for_type
- snapshots: snapshots_number
- snapshots_{volume_type}: snapshots_number_for_type
- backups: backups_number
- groups: groups_number
- per_volume_gigabytes: per_volume_gigabytes
- gigabytes: gigabytes
- gigabytes_{volume_type}: gigabytes_for_type
- backup_gigabytes: backup_gigabytes
Response Example
----------------
.. literalinclude:: ./samples/quota_sets/quotas-show-defaults-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/resource-filters.inc 0000664 0000000 0000000 00000001445 15131732575 0025537 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
.. _resource-filters:
Resource Filters (resource_filters)
===================================
Lists all resource filters, available since
microversion 3.33.
List resource filters
~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/resource_filters
List filters.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- resource: resource
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- resource_filters: resource_filters_coll
- filters: resource_filters
- resource: resource_fil
Response Example
----------------
.. literalinclude:: ./samples/resource-filters-list-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/ 0000775 0000000 0000000 00000000000 15131732575 0023207 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/attachment-complete.json 0000664 0000000 0000000 00000000032 15131732575 0030033 0 ustar 00root root 0000000 0000000 {
"os-complete": {}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/attachment-create-request.json 0000664 0000000 0000000 00000000750 15131732575 0031163 0 ustar 00root root 0000000 0000000 {
"attachment": {
"instance_uuid": "462dcc2d-130d-4654-8db1-da0df2da6a0d",
"connector": {
"initiator": "iqn.1993-08.org.debian: 01: cad181614cec",
"ip": "192.168.1.20",
"platform": "x86_64",
"host": "tempest-1",
"os_type": "linux2",
"multipath": false,
"mountpoint": "/dev/vdb",
"mode": "ro"
},
"volume_uuid": "462dcc2d-130d-4654-8db1-da0df2da6a0d"
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/attachment-create-response.json 0000664 0000000 0000000 00000000620 15131732575 0031325 0 ustar 00root root 0000000 0000000 {
"attachment": {
"status": "attaching",
"detached_at": "2015-09-16T09:28:52.000000",
"connection_info": {},
"attached_at": "2015-09-16T09:28:52.000000",
"attach_mode": "ro",
"instance": "3b8b6631-1cf7-4fd7-9afb-c01e541as345",
"volume_id": "462dcc2d-130d-4654-8db1-da0df2da6a0d",
"id": "3b8b6631-1cf7-4fd7-9afb-c01e541a073c"
}
} attachment-list-detailed-response.json 0000664 0000000 0000000 00000000705 15131732575 0032533 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"attachments": [
{
"status": "attaching",
"detached_at": "2015-09-16T09:28:52.000000",
"connection_info": {},
"attached_at": "2015-09-16T09:28:52.000000",
"attach_mode": "ro",
"instance": "31c79baf-b59e-469c-979f-1df4ecb6eea7",
"volume_id": "462dcc2d-130d-4654-8db1-da0df2da6a0d",
"id": "3b8b6631-1cf7-4fd7-9afb-c01e541a073c"
}
]
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/attachment-list-response.json 0000664 0000000 0000000 00000000420 15131732575 0031033 0 ustar 00root root 0000000 0000000 {
"attachments": [
{
"status": "attaching",
"instance": "31c79baf-b59e-469c-979f-1df4ecb6eea7",
"id": "3b8b6631-1cf7-4fd7-9afb-c01e541a073c",
"volume_id": "462dcc2d-130d-4654-8db1-da0df2da6a0d"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/attachment-show-response.json 0000664 0000000 0000000 00000000620 15131732575 0031042 0 ustar 00root root 0000000 0000000 {
"attachment": {
"status": "attaching",
"detached_at": "2015-09-16T09:28:52.000000",
"connection_info": {},
"attached_at": "2015-09-16T09:28:52.000000",
"attach_mode": "ro",
"instance": "3b8b6631-1cf7-4fd7-9afb-c01e541as345",
"volume_id": "462dcc2d-130d-4654-8db1-da0df2da6a0d",
"id": "3b8b6631-1cf7-4fd7-9afb-c01e541a073c"
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/attachment-update-request.json 0000664 0000000 0000000 00000000550 15131732575 0031200 0 ustar 00root root 0000000 0000000 {
"attachment": {
"connector": {
"initiator": "iqn.1993-08.org.debian: 01: cad181614cec",
"ip": "192.168.1.20",
"platform": "x86_64",
"host": "tempest-1",
"os_type": "linux2",
"multipath": false,
"mountpoint": "/dev/vdb",
"mode": "ro"
}
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/attachment-update-response.json 0000664 0000000 0000000 00000000620 15131732575 0031344 0 ustar 00root root 0000000 0000000 {
"attachment": {
"status": "attaching",
"detached_at": "2015-09-16T09:28:52.000000",
"connection_info": {},
"attached_at": "2015-09-16T09:28:52.000000",
"attach_mode": "ro",
"instance": "3b8b6631-1cf7-4fd7-9afb-c01e541as345",
"volume_id": "462dcc2d-130d-4654-8db1-da0df2da6a0d",
"id": "3b8b6631-1cf7-4fd7-9afb-c01e541a073c"
}
} availability-zone-list-response.json 0000664 0000000 0000000 00000000200 15131732575 0032243 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"availabilityZoneInfo": [{
"zoneState": {
"available": true
},
"zoneName": "nova"
}]
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backend-capabilities-response.json0000664 0000000 0000000 00000002042 15131732575 0031752 0 ustar 00root root 0000000 0000000 {
"namespace": "OS::Storage::Capabilities::fake",
"vendor_name": "OpenStack",
"volume_backend_name": "lvmdriver-1",
"pool_name": "pool",
"driver_version": "2.0.0",
"storage_protocol": "iSCSI",
"display_name": "Capabilities of Cinder LVM driver",
"description": "These are volume type options provided by Cinder LVM driver, blah, blah.",
"visibility": "public",
"replication_targets": [],
"properties": {
"compression": {
"title": "Compression",
"description": "Enables compression.",
"type": "boolean"
},
"qos": {
"title": "QoS",
"description": "Enables QoS.",
"type": "boolean"
},
"replication": {
"title": "Replication",
"description": "Enables replication.",
"type": "boolean"
},
"thin_provisioning": {
"title": "Thin Provisioning",
"description": "Sets thin provisioning.",
"type": "boolean"
}
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backup-force-delete-request.json 0000664 0000000 0000000 00000000036 15131732575 0031370 0 ustar 00root root 0000000 0000000 {
"os-force_delete": {}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backup-record-export-response.json0000664 0000000 0000000 00000000173 15131732575 0031777 0 ustar 00root root 0000000 0000000 {
"backup-record": {
"backup_service": "cinder.backup.drivers.swift",
"backup_url": "eyJzdGF0"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backup-record-import-request.json 0000664 0000000 0000000 00000000173 15131732575 0031622 0 ustar 00root root 0000000 0000000 {
"backup-record": {
"backup_service": "cinder.backup.drivers.swift",
"backup_url": "eyJzdGF0"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backup-record-import-response.json0000664 0000000 0000000 00000000775 15131732575 0032000 0 ustar 00root root 0000000 0000000 {
"backup": {
"id": "deac8b8c-35c9-4c71-acaa-889c2d5d5c8e",
"links": [
{
"href": "http://localhost:8776/v3/c95fc3e4afe248a49a28828f286a7b38/backups/deac8b8c-35c9-4c71-acaa-889c2d5d5c8e",
"rel": "self"
},
{
"href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/deac8b8c-35c9-4c71-acaa-889c2d5d5c8e",
"rel": "bookmark"
}
],
"name": null
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backup-reset-status-request.json 0000664 0000000 0000000 00000000101 15131732575 0031466 0 ustar 00root root 0000000 0000000 {
"os-reset_status": {
"status": "available"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backup-restore-request.json 0000664 0000000 0000000 00000000161 15131732575 0030514 0 ustar 00root root 0000000 0000000 {
"restore": {
"name": "vol-01",
"volume_id": "64f5d2fb-d836-4063-b7e2-544d5c1ff607"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backup-restore-response.json 0000664 0000000 0000000 00000000267 15131732575 0030671 0 ustar 00root root 0000000 0000000 {
"restore": {
"backup_id": "2ef47aee-8844-490c-804d-2a8efe561c65",
"volume_id": "795114e8-7489-40be-a978-83797f2c1dd3",
"volume_name": "volume01"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups-list-response.json 0000664 0000000 0000000 00000001041 15131732575 0030333 0 ustar 00root root 0000000 0000000 {
"backups": [
{
"id": "5e7a312e-af39-4fc0-8633-b8c2cdabb67d",
"links": [{
"href": "https://158.69.65.111/volume/v3/ca730406ba3c40b0870e0bd431271736/backups/5e7a312e-af39-4fc0-8633-b8c2cdabb67d",
"rel": "self"
}, {
"href": "https://158.69.65.111/volume/ca730406ba3c40b0870e0bd431271736/backups/5e7a312e-af39-4fc0-8633-b8c2cdabb67d",
"rel": "bookmark"
}],
"name": "tempest-VolumesBackupsAdminTest-Backup-1385312480"
}
],
"count": 1
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/ 0000775 0000000 0000000 00000000000 15131732575 0024637 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/backup-create-request.json0000664 0000000 0000000 00000000406 15131732575 0031726 0 ustar 00root root 0000000 0000000 {
"backup": {
"container": null,
"description": "Test backup",
"name": "backup001",
"volume_id": "0aa67a0c-7339-4be6-b5d5-2afe21ca270c",
"incremental": false,
"snapshot_id": null,
"force": false
}
} backup-create-response.json 0000664 0000000 0000000 00000001015 15131732575 0032012 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups {
"backup": {
"id": "b1f41f9b-741e-4992-a246-b97de7e6e87e",
"links": [
{
"href": "http://127.0.0.1:40797/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/b1f41f9b-741e-4992-a246-b97de7e6e87e",
"rel": "self"
},
{
"href": "http://127.0.0.1:40797/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/b1f41f9b-741e-4992-a246-b97de7e6e87e",
"rel": "bookmark"
}
],
"name": "backup001"
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/backup-show-response.json 0000664 0000000 0000000 00000002036 15131732575 0031612 0 ustar 00root root 0000000 0000000 {
"backup": {
"availability_zone": null,
"container": null,
"created_at": "2023-06-23T11:56:02.509831",
"data_timestamp": "2023-06-23T11:56:02.509831",
"description": "Test backup",
"fail_reason": null,
"has_dependent_backups": false,
"id": "6a122f4b-d2f6-448f-aeb5-68bae5ff8358",
"is_incremental": false,
"links": [
{
"href": "http://127.0.0.1:46627/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/6a122f4b-d2f6-448f-aeb5-68bae5ff8358",
"rel": "self"
},
{
"href": "http://127.0.0.1:46627/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/6a122f4b-d2f6-448f-aeb5-68bae5ff8358",
"rel": "bookmark"
}
],
"name": "backup001",
"object_count": 0,
"size": 10,
"snapshot_id": null,
"status": "creating",
"updated_at": "2023-06-23T11:56:02.512426",
"volume_id": "49a784cf-b759-4594-acdf-5238ee50976b"
}
} backups-list-detailed-response.json 0000664 0000000 0000000 00000002233 15131732575 0033461 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups {
"backups": [
{
"availability_zone": null,
"container": null,
"created_at": "2023-07-10T13:23:21.178739",
"data_timestamp": "2023-07-10T13:23:21.178739",
"description": "Test backup",
"fail_reason": null,
"has_dependent_backups": false,
"id": "7ab823f7-1174-4447-9a76-863ae2dcf372",
"is_incremental": false,
"links": [
{
"href": "http://127.0.0.1:44197/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/7ab823f7-1174-4447-9a76-863ae2dcf372",
"rel": "self"
},
{
"href": "http://127.0.0.1:44197/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/7ab823f7-1174-4447-9a76-863ae2dcf372",
"rel": "bookmark"
}
],
"name": "backup001",
"object_count": 0,
"size": 10,
"snapshot_id": null,
"status": "creating",
"updated_at": "2023-07-10T13:23:21.189552",
"volume_id": "9fc31617-303d-4b52-826e-b598cca40419"
}
]
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/backups-list-response.json0000664 0000000 0000000 00000001122 15131732575 0031763 0 ustar 00root root 0000000 0000000 {
"backups": [
{
"id": "c26d9897-cace-44cc-ad0f-3a0d0b6d1450",
"links": [
{
"href": "http://127.0.0.1:46803/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/c26d9897-cace-44cc-ad0f-3a0d0b6d1450",
"rel": "self"
},
{
"href": "http://127.0.0.1:46803/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/c26d9897-cace-44cc-ad0f-3a0d0b6d1450",
"rel": "bookmark"
}
],
"name": "backup001"
}
]
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.18/ 0000775 0000000 0000000 00000000000 15131732575 0025416 5 ustar 00root root 0000000 0000000 backup-create-response.json 0000664 0000000 0000000 00000001015 15131732575 0032571 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.18 {
"backup": {
"id": "73c2b8d8-e658-4396-a804-e1960b9330f9",
"links": [
{
"href": "http://127.0.0.1:34439/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/73c2b8d8-e658-4396-a804-e1960b9330f9",
"rel": "self"
},
{
"href": "http://127.0.0.1:34439/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/73c2b8d8-e658-4396-a804-e1960b9330f9",
"rel": "bookmark"
}
],
"name": "backup001"
}
} backup-show-response.json 0000664 0000000 0000000 00000002163 15131732575 0032313 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.18 {
"backup": {
"availability_zone": null,
"container": null,
"created_at": "2023-06-23T11:56:06.577029",
"data_timestamp": "2023-06-23T11:56:06.577029",
"description": "Test backup",
"fail_reason": null,
"has_dependent_backups": false,
"id": "606b1a40-65c3-40aa-aa35-bbaddf3b0cdc",
"is_incremental": false,
"links": [
{
"href": "http://127.0.0.1:40731/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/606b1a40-65c3-40aa-aa35-bbaddf3b0cdc",
"rel": "self"
},
{
"href": "http://127.0.0.1:40731/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/606b1a40-65c3-40aa-aa35-bbaddf3b0cdc",
"rel": "bookmark"
}
],
"name": "backup001",
"object_count": 0,
"os-backup-project-attr:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"size": 10,
"snapshot_id": null,
"status": "creating",
"updated_at": "2023-06-23T11:56:06.579796",
"volume_id": "5c4f87bc-031c-455b-b936-bfedb85a1d24"
}
} backups-list-detailed-response.json 0000664 0000000 0000000 00000002364 15131732575 0034245 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.18 {
"backups": [
{
"availability_zone": null,
"container": null,
"created_at": "2023-06-23T11:55:59.754975",
"data_timestamp": "2023-06-23T11:55:59.754975",
"description": "Test backup",
"fail_reason": null,
"has_dependent_backups": false,
"id": "89881aac-2ce3-476b-bb0f-23c440a5e141",
"is_incremental": false,
"links": [
{
"href": "http://127.0.0.1:37207/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/89881aac-2ce3-476b-bb0f-23c440a5e141",
"rel": "self"
},
{
"href": "http://127.0.0.1:37207/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/89881aac-2ce3-476b-bb0f-23c440a5e141",
"rel": "bookmark"
}
],
"name": "backup001",
"object_count": 0,
"os-backup-project-attr:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"size": 10,
"snapshot_id": null,
"status": "creating",
"updated_at": "2023-06-23T11:55:59.759269",
"volume_id": "66eda5bf-7163-4316-a0b5-afb14c43625b"
}
]
} backups-list-response.json 0000664 0000000 0000000 00000001122 15131732575 0032463 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.18 {
"backups": [
{
"id": "782c1178-79b7-4caf-845b-c226cf288ca0",
"links": [
{
"href": "http://127.0.0.1:36723/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/782c1178-79b7-4caf-845b-c226cf288ca0",
"rel": "self"
},
{
"href": "http://127.0.0.1:36723/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/782c1178-79b7-4caf-845b-c226cf288ca0",
"rel": "bookmark"
}
],
"name": "backup001"
}
]
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.43/ 0000775 0000000 0000000 00000000000 15131732575 0025414 5 ustar 00root root 0000000 0000000 backup-create-response.json 0000664 0000000 0000000 00000001015 15131732575 0032567 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.43 {
"backup": {
"id": "992835c9-4ea4-4433-aa1d-c8725c041af2",
"links": [
{
"href": "http://127.0.0.1:38909/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/992835c9-4ea4-4433-aa1d-c8725c041af2",
"rel": "self"
},
{
"href": "http://127.0.0.1:38909/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/992835c9-4ea4-4433-aa1d-c8725c041af2",
"rel": "bookmark"
}
],
"name": "backup001"
}
} backup-show-response.json 0000664 0000000 0000000 00000002213 15131732575 0032305 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.43 {
"backup": {
"availability_zone": null,
"container": null,
"created_at": "2023-06-23T11:56:04.957710",
"data_timestamp": "2023-06-23T11:56:04.957710",
"description": "Test backup",
"fail_reason": null,
"has_dependent_backups": false,
"id": "bb512d13-a64c-4793-b153-939b8c9b638f",
"is_incremental": false,
"links": [
{
"href": "http://127.0.0.1:45785/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/bb512d13-a64c-4793-b153-939b8c9b638f",
"rel": "self"
},
{
"href": "http://127.0.0.1:45785/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/bb512d13-a64c-4793-b153-939b8c9b638f",
"rel": "bookmark"
}
],
"metadata": {},
"name": "backup001",
"object_count": 0,
"os-backup-project-attr:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"size": 10,
"snapshot_id": null,
"status": "creating",
"updated_at": "2023-06-23T11:56:04.960494",
"volume_id": "f9f36c56-29a0-46a1-88c1-9bcf45fb271b"
}
} backups-list-detailed-response.json 0000664 0000000 0000000 00000002420 15131732575 0034234 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.43 {
"backups": [
{
"availability_zone": null,
"container": null,
"created_at": "2023-06-23T11:55:56.808833",
"data_timestamp": "2023-06-23T11:55:56.808833",
"description": "Test backup",
"fail_reason": null,
"has_dependent_backups": false,
"id": "cafabbef-cf1d-45a4-95c0-7395f30fd334",
"is_incremental": false,
"links": [
{
"href": "http://127.0.0.1:34215/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/cafabbef-cf1d-45a4-95c0-7395f30fd334",
"rel": "self"
},
{
"href": "http://127.0.0.1:34215/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/cafabbef-cf1d-45a4-95c0-7395f30fd334",
"rel": "bookmark"
}
],
"metadata": {},
"name": "backup001",
"object_count": 0,
"os-backup-project-attr:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"size": 10,
"snapshot_id": null,
"status": "creating",
"updated_at": "2023-06-23T11:55:56.811458",
"volume_id": "aa4f5314-143f-4ad9-8677-17d52032f943"
}
]
} backups-list-response.json 0000664 0000000 0000000 00000001122 15131732575 0032461 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.43 {
"backups": [
{
"id": "6fea7c87-7c93-4670-b74d-97319d71f95a",
"links": [
{
"href": "http://127.0.0.1:46541/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/6fea7c87-7c93-4670-b74d-97319d71f95a",
"rel": "self"
},
{
"href": "http://127.0.0.1:46541/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/6fea7c87-7c93-4670-b74d-97319d71f95a",
"rel": "bookmark"
}
],
"name": "backup001"
}
]
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.45/ 0000775 0000000 0000000 00000000000 15131732575 0025416 5 ustar 00root root 0000000 0000000 backup-create-response.json 0000664 0000000 0000000 00000001015 15131732575 0032571 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.45 {
"backup": {
"id": "ca97fe1d-8d8c-4b97-8439-f8bcfe5fe048",
"links": [
{
"href": "http://127.0.0.1:40345/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/ca97fe1d-8d8c-4b97-8439-f8bcfe5fe048",
"rel": "self"
},
{
"href": "http://127.0.0.1:40345/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/ca97fe1d-8d8c-4b97-8439-f8bcfe5fe048",
"rel": "bookmark"
}
],
"name": "backup001"
}
} backup-show-response.json 0000664 0000000 0000000 00000002213 15131732575 0032307 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.45 {
"backup": {
"availability_zone": null,
"container": null,
"created_at": "2023-06-23T11:56:07.334265",
"data_timestamp": "2023-06-23T11:56:07.334265",
"description": "Test backup",
"fail_reason": null,
"has_dependent_backups": false,
"id": "5edd3373-8fae-4ae5-a63f-7282df75b2f8",
"is_incremental": false,
"links": [
{
"href": "http://127.0.0.1:33005/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/5edd3373-8fae-4ae5-a63f-7282df75b2f8",
"rel": "self"
},
{
"href": "http://127.0.0.1:33005/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/5edd3373-8fae-4ae5-a63f-7282df75b2f8",
"rel": "bookmark"
}
],
"metadata": {},
"name": "backup001",
"object_count": 0,
"os-backup-project-attr:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"size": 10,
"snapshot_id": null,
"status": "creating",
"updated_at": "2023-06-23T11:56:07.350705",
"volume_id": "164476de-38ba-44a3-b00c-78624a5256ff"
}
} backups-list-detailed-response.json 0000664 0000000 0000000 00000002440 15131732575 0034240 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.45 {
"backups": [
{
"availability_zone": null,
"container": null,
"created_at": "2023-06-23T11:55:59.217859",
"data_timestamp": "2023-06-23T11:55:59.217859",
"description": "Test backup",
"fail_reason": null,
"has_dependent_backups": false,
"id": "3287d2a2-38fb-4a62-b9c4-d0faf601650c",
"is_incremental": false,
"links": [
{
"href": "http://127.0.0.1:46657/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/3287d2a2-38fb-4a62-b9c4-d0faf601650c",
"rel": "self"
},
{
"href": "http://127.0.0.1:46657/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/3287d2a2-38fb-4a62-b9c4-d0faf601650c",
"rel": "bookmark"
}
],
"metadata": {},
"name": "backup001",
"object_count": 0,
"os-backup-project-attr:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"size": 10,
"snapshot_id": null,
"status": "creating",
"updated_at": "2023-06-23T11:55:59.221858",
"volume_id": "cc41abad-350c-45c2-a39b-82f3f891a954"
}
],
"count": 1
} backups-list-response.json 0000664 0000000 0000000 00000001142 15131732575 0032465 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.45 {
"backups": [
{
"id": "0bc2fd0c-2727-440f-945e-97f653bf3cad",
"links": [
{
"href": "http://127.0.0.1:46279/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/0bc2fd0c-2727-440f-945e-97f653bf3cad",
"rel": "self"
},
{
"href": "http://127.0.0.1:46279/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/0bc2fd0c-2727-440f-945e-97f653bf3cad",
"rel": "bookmark"
}
],
"name": "backup001"
}
],
"count": 1
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.56/ 0000775 0000000 0000000 00000000000 15131732575 0025420 5 ustar 00root root 0000000 0000000 backup-create-response.json 0000664 0000000 0000000 00000001015 15131732575 0032573 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.56 {
"backup": {
"id": "15b73866-f643-407d-9c53-377d9eb3e3fc",
"links": [
{
"href": "http://127.0.0.1:38593/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/15b73866-f643-407d-9c53-377d9eb3e3fc",
"rel": "self"
},
{
"href": "http://127.0.0.1:38593/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/15b73866-f643-407d-9c53-377d9eb3e3fc",
"rel": "bookmark"
}
],
"name": "backup001"
}
} backup-show-response.json 0000664 0000000 0000000 00000002306 15131732575 0032314 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.56 {
"backup": {
"availability_zone": null,
"container": null,
"created_at": "2023-06-23T11:56:08.691468",
"data_timestamp": "2023-06-23T11:56:08.691468",
"description": "Test backup",
"fail_reason": null,
"has_dependent_backups": false,
"id": "3052c307-119e-4f78-960e-972078aa15a8",
"is_incremental": false,
"links": [
{
"href": "http://127.0.0.1:38135/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/3052c307-119e-4f78-960e-972078aa15a8",
"rel": "self"
},
{
"href": "http://127.0.0.1:38135/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/3052c307-119e-4f78-960e-972078aa15a8",
"rel": "bookmark"
}
],
"metadata": {},
"name": "backup001",
"object_count": 0,
"os-backup-project-attr:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"size": 10,
"snapshot_id": null,
"status": "creating",
"updated_at": "2023-06-23T11:56:08.693488",
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_id": "e763a78b-edc5-48fb-bbb3-fddc1062e27a"
}
} backups-list-detailed-response.json 0000664 0000000 0000000 00000002537 15131732575 0034251 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.56 {
"backups": [
{
"availability_zone": null,
"container": null,
"created_at": "2023-06-23T11:56:02.012007",
"data_timestamp": "2023-06-23T11:56:02.012007",
"description": "Test backup",
"fail_reason": null,
"has_dependent_backups": false,
"id": "72915888-cfcb-4f41-a416-bab824f3e8ba",
"is_incremental": false,
"links": [
{
"href": "http://127.0.0.1:34501/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/72915888-cfcb-4f41-a416-bab824f3e8ba",
"rel": "self"
},
{
"href": "http://127.0.0.1:34501/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/72915888-cfcb-4f41-a416-bab824f3e8ba",
"rel": "bookmark"
}
],
"metadata": {},
"name": "backup001",
"object_count": 0,
"os-backup-project-attr:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"size": 10,
"snapshot_id": null,
"status": "creating",
"updated_at": "2023-06-23T11:56:02.014872",
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_id": "9de8bbf6-015e-4ccd-a484-1c93acc85f60"
}
],
"count": 1
} backups-list-response.json 0000664 0000000 0000000 00000001142 15131732575 0032467 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.56 {
"backups": [
{
"id": "eb129a85-fba3-4164-9a5e-9c3394b97810",
"links": [
{
"href": "http://127.0.0.1:40523/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/eb129a85-fba3-4164-9a5e-9c3394b97810",
"rel": "self"
},
{
"href": "http://127.0.0.1:40523/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/eb129a85-fba3-4164-9a5e-9c3394b97810",
"rel": "bookmark"
}
],
"name": "backup001"
}
],
"count": 1
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.9/ 0000775 0000000 0000000 00000000000 15131732575 0025336 5 ustar 00root root 0000000 0000000 backup-create-response.json 0000664 0000000 0000000 00000001015 15131732575 0032511 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.9 {
"backup": {
"id": "41f7183c-a53d-4690-a7a9-b46f5bb1acbd",
"links": [
{
"href": "http://127.0.0.1:34865/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/41f7183c-a53d-4690-a7a9-b46f5bb1acbd",
"rel": "self"
},
{
"href": "http://127.0.0.1:34865/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/41f7183c-a53d-4690-a7a9-b46f5bb1acbd",
"rel": "bookmark"
}
],
"name": "backup001"
}
} backup-show-response.json 0000664 0000000 0000000 00000002036 15131732575 0032232 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.9 {
"backup": {
"availability_zone": null,
"container": null,
"created_at": "2023-06-23T11:56:09.563928",
"data_timestamp": "2023-06-23T11:56:09.563928",
"description": "Test backup",
"fail_reason": null,
"has_dependent_backups": false,
"id": "3a6b5767-358c-4185-bcda-95b401fa3893",
"is_incremental": false,
"links": [
{
"href": "http://127.0.0.1:36513/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/3a6b5767-358c-4185-bcda-95b401fa3893",
"rel": "self"
},
{
"href": "http://127.0.0.1:36513/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/3a6b5767-358c-4185-bcda-95b401fa3893",
"rel": "bookmark"
}
],
"name": "backup001",
"object_count": 0,
"size": 10,
"snapshot_id": null,
"status": "creating",
"updated_at": "2023-06-23T11:56:09.567593",
"volume_id": "9cfc0bc4-cf52-45c2-b461-502ae375e2a7"
}
} backup-update-request.json 0000664 0000000 0000000 00000000136 15131732575 0032365 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.9 {
"backup":{
"name":"backup001",
"description": "this is a backup"
}
} backup-update-response.json 0000664 0000000 0000000 00000001015 15131732575 0032530 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.9 {
"backup": {
"id": "06d5db4f-1f80-4a71-99a6-99368cfb8f8e",
"links": [
{
"href": "http://127.0.0.1:45187/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/06d5db4f-1f80-4a71-99a6-99368cfb8f8e",
"rel": "self"
},
{
"href": "http://127.0.0.1:45187/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/06d5db4f-1f80-4a71-99a6-99368cfb8f8e",
"rel": "bookmark"
}
],
"name": "backup001"
}
} backups-list-detailed-response.json 0000664 0000000 0000000 00000002233 15131732575 0034160 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.9 {
"backups": [
{
"availability_zone": null,
"container": null,
"created_at": "2023-06-23T11:56:04.395991",
"data_timestamp": "2023-06-23T11:56:04.395991",
"description": "Test backup",
"fail_reason": null,
"has_dependent_backups": false,
"id": "a3469ffa-acb3-427d-b31f-1c93c96b009f",
"is_incremental": false,
"links": [
{
"href": "http://127.0.0.1:43581/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/a3469ffa-acb3-427d-b31f-1c93c96b009f",
"rel": "self"
},
{
"href": "http://127.0.0.1:43581/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/a3469ffa-acb3-427d-b31f-1c93c96b009f",
"rel": "bookmark"
}
],
"name": "backup001",
"object_count": 0,
"size": 10,
"snapshot_id": null,
"status": "creating",
"updated_at": "2023-06-23T11:56:04.398251",
"volume_id": "b894eba0-506d-4019-b7b2-8508605017ba"
}
]
} backups-list-response.json 0000664 0000000 0000000 00000001122 15131732575 0032403 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/backups/v3.9 {
"backups": [
{
"id": "f08aaa97-3644-4e46-9e0b-7cddce86db9c",
"links": [
{
"href": "http://127.0.0.1:33071/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/f08aaa97-3644-4e46-9e0b-7cddce86db9c",
"rel": "self"
},
{
"href": "http://127.0.0.1:33071/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/f08aaa97-3644-4e46-9e0b-7cddce86db9c",
"rel": "bookmark"
}
],
"name": "backup001"
}
]
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/cgsnapshots-create-request.json 0000664 0000000 0000000 00000000317 15131732575 0031366 0 ustar 00root root 0000000 0000000 {
"cgsnapshot": {
"consistencygroup_id": "6f519a48-3183-46cf-a32f-41815f814546",
"name": "firstcg",
"description": "first consistency group",
"status": "creating"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/cgsnapshots-create-response.json 0000664 0000000 0000000 00000000156 15131732575 0031535 0 ustar 00root root 0000000 0000000 {
"cgsnapshot": {
"id": "6f519a48-3183-46cf-a32f-41815f816666",
"name": "firstcg"
}
}
cgsnapshots-list-detailed-response.json 0000664 0000000 0000000 00000001250 15131732575 0032733 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"cgsnapshots": [
{
"id": "6f519a48-3183-46cf-a32f-41815f813986",
"consistencygroup_id": "6f519a48-3183-46cf-a32f-41815f814444",
"status": "available",
"created_at": "2015-09-16T09:28:52.000000",
"name": "my-cg1",
"description": "my first consistency group"
},
{
"id": "aed36625-a6d7-4681-ba59-c7ba3d18c148",
"consistencygroup_id": "aed36625-a6d7-4681-ba59-c7ba3d18dddd",
"status": "error",
"created_at": "2015-09-16T09:31:15.000000",
"name": "my-cg2",
"description": "Edited description"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/cgsnapshots-list-response.json 0000664 0000000 0000000 00000000366 15131732575 0031250 0 ustar 00root root 0000000 0000000 {
"cgsnapshots": [
{
"id": "6f519a48-3183-46cf-a32f-41815f813986",
"name": "my-cg1"
},
{
"id": "aed36625-a6d7-4681-ba59-c7ba3d18c148",
"name": "my-cg2"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/cgsnapshots-show-response.json 0000664 0000000 0000000 00000000474 15131732575 0031255 0 ustar 00root root 0000000 0000000 {
"cgsnapshot": {
"id": "6f519a48-3183-46cf-a32f-41815f813986",
"consistencygroup_id": "6f519a48-3183-46cf-a32f-41815f814444",
"status": "available",
"created_at": "2015-09-16T09:28:52.000000",
"name": "my-cg1",
"description": "my first consistency group"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/clusters/ 0000775 0000000 0000000 00000000000 15131732575 0025053 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/clusters/v3.7/ 0000775 0000000 0000000 00000000000 15131732575 0025550 5 ustar 00root root 0000000 0000000 cluster-disable-request.json 0000664 0000000 0000000 00000000144 15131732575 0033133 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/clusters/v3.7 {
"name": "cluster_name",
"binary": "cinder-volume",
"disabled_reason": "for testing"
}
cluster-disable-response.json 0000664 0000000 0000000 00000000276 15131732575 0033307 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/clusters/v3.7 {
"cluster": {
"binary": "cinder-volume",
"disabled_reason": "for testing",
"name": "cluster_name",
"state": "down",
"status": "disabled"
}
}
cluster-enable-request.json 0000664 0000000 0000000 00000000076 15131732575 0032762 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/clusters/v3.7 {
"name": "cluster_name",
"binary": "cinder-volume"
}
cluster-enable-response.json 0000664 0000000 0000000 00000000264 15131732575 0033127 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/clusters/v3.7 {
"cluster": {
"name": "cluster_name",
"state": "down",
"binary": "cinder-volume",
"status": "enabled",
"disabled_reason": null
}
}
cluster-show-response.json 0000664 0000000 0000000 00000000610 15131732575 0032654 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/clusters/v3.7 {
"cluster": {
"binary": "cinder-volume",
"created_at": "2016-06-01T02:46:28.000000",
"disabled_reason": null,
"last_heartbeat": "2016-06-01T02:46:28.000000",
"name": "cluster_name",
"num_down_hosts": 0,
"num_hosts": 0,
"state": "down",
"status": "enabled",
"updated_at": "2016-06-01T02:46:28.000000"
}
} clusters-list-detailed-response.json 0000664 0000000 0000000 00000001534 15131732575 0034611 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/clusters/v3.7 {
"clusters": [
{
"binary": "cinder-volume",
"created_at": "2016-06-01T02:46:28.000000",
"disabled_reason": null,
"last_heartbeat": "2016-06-01T02:46:28.000000",
"name": "cluster_name",
"num_down_hosts": 0,
"num_hosts": 0,
"state": "down",
"status": "enabled",
"updated_at": "2016-06-01T02:46:28.000000"
},
{
"binary": "cinder-volume",
"created_at": "2016-06-01T01:46:28.000000",
"disabled_reason": "for testing",
"last_heartbeat": "",
"name": "cluster2",
"num_down_hosts": 1,
"num_hosts": 2,
"state": "down",
"status": "disabled",
"updated_at": "2016-06-01T01:46:28.000000"
}
]
} clusters-list-response.json 0000664 0000000 0000000 00000000522 15131732575 0033034 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/clusters/v3.7 {
"clusters": [
{
"name": "cluster_name",
"binary": "cinder-volume",
"state": "down",
"status": "enabled"
},
{
"name": "cluster2",
"binary": "cinder-volume",
"state": "down",
"status": "disabled"
}
]
}
consistency-group-create-from-src-request.json 0000664 0000000 0000000 00000000620 15131732575 0034171 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"consistencygroup-from-src": {
"name": "firstcg",
"description": "first consistency group",
"cgsnapshot_id": "6f519a48-3183-46cf-a32f-41815f813986",
"source_cgid": "6f519a48-3183-46cf-a32f-41815f814546",
"user_id": "6f519a48-3183-46cf-a32f-41815f815555",
"project_id": "6f519a48-3183-46cf-a32f-41815f814444",
"status": "creating"
}
}
consistency-group-create-request.json 0000664 0000000 0000000 00000000273 15131732575 0032447 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"consistencygroup": {
"name": "firstcg",
"description": "first consistency group",
"volume_types": "type1,type2",
"availability_zone": "az0"
}
}
consistency-group-create-response.json 0000664 0000000 0000000 00000000505 15131732575 0032613 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"consistencygroup": {
"status": "error",
"description": "first consistency group",
"availability_zone": "az0",
"created_at": "2016-08-19T19:32:19.000000",
"volume_types": ["type1", "type2"],
"id": "63d1a274-de38-4384-a97e-475306777027",
"name": "firstcg"
}
}
consistency-group-delete-request.json 0000664 0000000 0000000 00000000073 15131732575 0032444 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"consistencygroup": {
"force": false
}
}
consistency-group-show-response.json 0000664 0000000 0000000 00000000531 15131732575 0032327 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"consistencygroup": {
"id": "6f519a48-3183-46cf-a32f-41815f813986",
"status": "available",
"availability_zone": "az1",
"created_at": "2015-09-16T09:28:52.000000",
"name": "my-cg1",
"description": "my first consistency group",
"volume_types": [
"123456"
]
}
}
consistency-group-update-request.json 0000664 0000000 0000000 00000000332 15131732575 0032462 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"consistencygroup": {
"name": "my_cg",
"description": "My consistency group",
"add_volumes": "volume-uuid-1,volume-uuid-2",
"remove_volumes": "volume-uuid-8,volume-uuid-9"
}
}
consistency-groups-list-detailed-response.json 0000664 0000000 0000000 00000001364 15131732575 0034263 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"consistencygroups": [
{
"id": "6f519a48-3183-46cf-a32f-41815f813986",
"status": "available",
"availability_zone": "az1",
"created_at": "2015-09-16T09:28:52.000000",
"name": "my-cg1",
"description": "my first consistency group",
"volume_types": [
"123456"
]
},
{
"id": "aed36625-a6d7-4681-ba59-c7ba3d18c148",
"status": "error",
"availability_zone": "az2",
"created_at": "2015-09-16T09:31:15.000000",
"name": "my-cg2",
"description": "Edited description",
"volume_types": [
"234567"
]
}
]
}
consistency-groups-list-response.json 0000664 0000000 0000000 00000000374 15131732575 0032512 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"consistencygroups": [
{
"id": "6f519a48-3183-46cf-a32f-41815f813986",
"name": "my-cg1"
},
{
"id": "aed36625-a6d7-4681-ba59-c7ba3d18c148",
"name": "my-cg2"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/extensions/ 0000775 0000000 0000000 00000000000 15131732575 0025406 5 ustar 00root root 0000000 0000000 extensions-list-response.json 0000664 0000000 0000000 00000017111 15131732575 0033227 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/extensions {
"extensions": [
{
"alias": "os-hosts",
"description": "Admin-only host administration.",
"links": [],
"name": "Hosts",
"updated": "2011-06-29T00:00:00+00:00"
},
{
"alias": "os-vol-tenant-attr",
"description": "Expose the internal project_id as an attribute of a volume.",
"links": [],
"name": "VolumeTenantAttribute",
"updated": "2011-11-03T00:00:00+00:00"
},
{
"alias": "os-quota-sets",
"description": "Quota management support.",
"links": [],
"name": "Quotas",
"updated": "2011-08-08T00:00:00+00:00"
},
{
"alias": "os-availability-zone",
"description": "Describe Availability Zones.",
"links": [],
"name": "AvailabilityZones",
"updated": "2013-06-27T00:00:00+00:00"
},
{
"alias": "os-volume-encryption-metadata",
"description": "Volume encryption metadata retrieval support.",
"links": [],
"name": "VolumeEncryptionMetadata",
"updated": "2013-07-10T00:00:00+00:00"
},
{
"alias": "backups",
"description": "Backups support.",
"links": [],
"name": "Backups",
"updated": "2012-12-12T00:00:00+00:00"
},
{
"alias": "os-snapshot-actions",
"description": "Enable snapshot manager actions.",
"links": [],
"name": "SnapshotActions",
"updated": "2013-07-16T00:00:00+00:00"
},
{
"alias": "os-volume-actions",
"description": "Enable volume actions.",
"links": [],
"name": "VolumeActions",
"updated": "2012-05-31T00:00:00+00:00"
},
{
"alias": "os-snapshot-manage",
"description": "Allows existing backend storage to be 'managed' by Cinder.",
"links": [],
"name": "SnapshotManage",
"updated": "2014-12-31T00:00:00+00:00"
},
{
"alias": "os-volume-unmanage",
"description": "Enable volume unmanage operation.",
"links": [],
"name": "VolumeUnmanage",
"updated": "2012-05-31T00:00:00+00:00"
},
{
"alias": "consistencygroups",
"description": "consistency groups support.",
"links": [],
"name": "Consistencygroups",
"updated": "2014-08-18T00:00:00+00:00"
},
{
"alias": "os-vol-host-attr",
"description": "Expose host as an attribute of a volume.",
"links": [],
"name": "VolumeHostAttribute",
"updated": "2011-11-03T00:00:00+00:00"
},
{
"alias": "encryption",
"description": "Encryption support for volume types.",
"links": [],
"name": "VolumeTypeEncryption",
"updated": "2013-07-01T00:00:00+00:00"
},
{
"alias": "os-vol-image-meta",
"description": "Show image metadata associated with the volume.",
"links": [],
"name": "VolumeImageMetadata",
"updated": "2012-12-07T00:00:00+00:00"
},
{
"alias": "os-types-manage",
"description": "Types manage support.",
"links": [],
"name": "TypesManage",
"updated": "2011-08-24T00:00:00+00:00"
},
{
"alias": "capabilities",
"description": "Capabilities support.",
"links": [],
"name": "Capabilities",
"updated": "2015-08-31T00:00:00+00:00"
},
{
"alias": "cgsnapshots",
"description": "cgsnapshots support.",
"links": [],
"name": "Cgsnapshots",
"updated": "2014-08-18T00:00:00+00:00"
},
{
"alias": "os-types-extra-specs",
"description": "Type extra specs support.",
"links": [],
"name": "TypesExtraSpecs",
"updated": "2011-08-24T00:00:00+00:00"
},
{
"alias": "os-used-limits",
"description": "Provide data on limited resources that are being used.",
"links": [],
"name": "UsedLimits",
"updated": "2013-10-03T00:00:00+00:00"
},
{
"alias": "os-vol-mig-status-attr",
"description": "Expose migration_status as an attribute of a volume.",
"links": [],
"name": "VolumeMigStatusAttribute",
"updated": "2013-08-08T00:00:00+00:00"
},
{
"alias": "os-volume-type-access",
"description": "Volume type access support.",
"links": [],
"name": "VolumeTypeAccess",
"updated": "2014-06-26T00:00:00Z"
},
{
"alias": "os-extended-services",
"description": "Extended services support.",
"links": [],
"name": "ExtendedServices",
"updated": "2014-01-10T00:00:00-00:00"
},
{
"alias": "os-extended-snapshot-attributes",
"description": "Extended SnapshotAttributes support.",
"links": [],
"name": "ExtendedSnapshotAttributes",
"updated": "2012-06-19T00:00:00+00:00"
},
{
"alias": "os-snapshot-unmanage",
"description": "Enable volume unmanage operation.",
"links": [],
"name": "SnapshotUnmanage",
"updated": "2014-12-31T00:00:00+00:00"
},
{
"alias": "qos-specs",
"description": "QoS specs support.",
"links": [],
"name": "Qos_specs_manage",
"updated": "2013-08-02T00:00:00+00:00"
},
{
"alias": "os-quota-class-sets",
"description": "Quota classes management support.",
"links": [],
"name": "QuotaClasses",
"updated": "2012-03-12T00:00:00+00:00"
},
{
"alias": "os-volume-transfer",
"description": "Volume transfer management support.",
"links": [],
"name": "VolumeTransfer",
"updated": "2013-05-29T00:00:00+00:00"
},
{
"alias": "os-volume-manage",
"description": "Allows existing backend storage to be 'managed' by Cinder.",
"links": [],
"name": "VolumeManage",
"updated": "2014-02-10T00:00:00+00:00"
},
{
"alias": "os-admin-actions",
"description": "Enable admin actions.",
"links": [],
"name": "AdminActions",
"updated": "2012-08-25T00:00:00+00:00"
},
{
"alias": "os-services",
"description": "Services support.",
"links": [],
"name": "Services",
"updated": "2012-10-28T00:00:00-00:00"
},
{
"alias": "scheduler-stats",
"description": "Scheduler stats support.",
"links": [],
"name": "Scheduler_stats",
"updated": "2014-09-07T00:00:00+00:00"
},
{
"alias": "OS-SCH-HNT",
"description": "Pass arbitrary key/value pairs to the scheduler.",
"links": [],
"name": "SchedulerHints",
"updated": "2013-04-18T00:00:00+00:00"
}
]
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/get-default-type-response.json 0000664 0000000 0000000 00000000236 15131732575 0031117 0 ustar 00root root 0000000 0000000 {
"default_type": {
"project_id": "6685584b-1eac-4da6-b5c3-555430cf68ff",
"volume_type_id": "40ec6e5e-c9bd-4170-8740-c1cd42d7eabb"
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/get-default-types-response.json 0000664 0000000 0000000 00000000507 15131732575 0031303 0 ustar 00root root 0000000 0000000 {
"default_types": [
{
"project_id": "6685584b-1eac-4da6-b5c3-555430cf68ff",
"volume_type_id": "40ec6e5e-c9bd-4170-8740-c1cd42d7eabb"
},
{
"project_id": "dd46ea3e-6f3f-4e50-85fa-40c182e25d12",
"volume_type_id": "9fb51b63-3cd4-493f-9380-53d8f0a04bd4"
}
]
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/group-create-from-src-request.json0000664 0000000 0000000 00000000315 15131732575 0031712 0 ustar 00root root 0000000 0000000 {
"create-from-src": {
"name": "first_group",
"description": "first group",
"group_snapshot_id": "6f519a48-3183-46cf-a32f-41815f813986",
"source_group_id": null
}
}
group-create-from-src-response.json 0000664 0000000 0000000 00000000155 15131732575 0032003 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"group": {
"id": "6f519a48-3183-46cf-a32f-41815f816666",
"name": "first_group"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/group-create-request.json 0000664 0000000 0000000 00000000513 15131732575 0030164 0 ustar 00root root 0000000 0000000 {
"group": {
"name": "first_group",
"description": "first group",
"group_type": "29514915-5208-46ab-9ece-1cc4688ad0c1",
"volume_types": [
"4e9e6d23-eed0-426d-b90a-28f87a94b6fe",
"c4daaf47-c530-4901-b28e-f5f0a359c4e6"
],
"availability_zone": "az0"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/group-create-response.json 0000664 0000000 0000000 00000000155 15131732575 0030334 0 ustar 00root root 0000000 0000000 {
"group": {
"id": "6f519a48-3183-46cf-a32f-41815f816666",
"name": "first_group"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/group-delete-request.json 0000664 0000000 0000000 00000000072 15131732575 0030163 0 ustar 00root root 0000000 0000000 {
"delete": {
"delete-volumes": false
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/group-replication-disable.json 0000664 0000000 0000000 00000000042 15131732575 0031142 0 ustar 00root root 0000000 0000000 {
"disable_replication": {}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/group-replication-enable.json 0000664 0000000 0000000 00000000041 15131732575 0030764 0 ustar 00root root 0000000 0000000 {
"enable_replication": {}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/group-replication-failover.json 0000664 0000000 0000000 00000000175 15131732575 0031355 0 ustar 00root root 0000000 0000000 {
"failover_replication": {
"allow_attached_volume": true,
"secondary_backend_id": "vendor-id-1"
}
}
group-replication-list-targets.json 0000664 0000000 0000000 00000000047 15131732575 0032107 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"list_replication_targets": {}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/group-replication-target.json 0000664 0000000 0000000 00000000153 15131732575 0031030 0 ustar 00root root 0000000 0000000 {
"replication_targets": {
"backend_id": "vendor-id-1",
"unique_key": "value1"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/group-reset-status-request.json 0000664 0000000 0000000 00000000075 15131732575 0031367 0 ustar 00root root 0000000 0000000 {
"reset_status": {
"status": "available"
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/group-show-response.json 0000664 0000000 0000000 00000001136 15131732575 0030051 0 ustar 00root root 0000000 0000000 {
"group": {
"id": "6f519a48-3183-46cf-a32f-41815f813986",
"status": "available",
"availability_zone": "az1",
"created_at": "2015-09-16T09:28:52.000000",
"name": "first_group",
"description": "my first group",
"group_type": "29514915-5208-46ab-9ece-1cc4688ad0c1",
"volume_types": [
"c4daaf47-c530-4901-b28e-f5f0a359c4e6"
],
"volumes": ["a2cdf1ad-5497-4e57-bd7d-f573768f3d03"],
"group_snapshot_id": null,
"source_group_id": null,
"project_id": "7ccf4863071f44aeb8f141f65780c51b"
}
}
group-snapshot-reset-status-request.json 0000664 0000000 0000000 00000000075 15131732575 0033145 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"reset_status": {
"status": "available"
}
} group-snapshots-create-request.json 0000664 0000000 0000000 00000000264 15131732575 0032130 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"group_snapshot": {
"group_id": "6f519a48-3183-46cf-a32f-41815f814546",
"name": "first_group_snapshot",
"description": "first group snapshot"
}
}
group-snapshots-create-response.json 0000664 0000000 0000000 00000000300 15131732575 0032265 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"group_snapshot": {
"id": "6f519a48-3183-46cf-a32f-41815f816666",
"name": "first_group_snapshot",
"group_type_id": "58737af7-786b-48b7-ab7c-2447e74b0ef4"
}
}
group-snapshots-list-detailed-response.json 0000664 0000000 0000000 00000001661 15131732575 0033561 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"group_snapshots": [
{
"id": "6f519a48-3183-46cf-a32f-41815f813986",
"group_id": "6f519a48-3183-46cf-a32f-41815f814444",
"status": "available",
"created_at": "2015-09-16T09:28:52.000000",
"name": "my_group_snapshot1",
"description": "my first group snapshot",
"group_type_id": "0ef094a2-d9fd-4c79-acfd-ac60a0506b7d",
"project_id": "7ccf4863071f44aeb8f141f65780c51b"
},
{
"id": "aed36625-a6d7-4681-ba59-c7ba3d18c148",
"group_id": "aed36625-a6d7-4681-ba59-c7ba3d18dddd",
"status": "error",
"created_at": "2015-09-16T09:31:15.000000",
"name": "my_group_snapshot2",
"description": "Edited description",
"group_type_id": "7270c56e-6354-4528-8e8b-f54dee2232c8",
"project_id": "7ccf4863071f44aeb8f141f65780c51b"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/group-snapshots-list-response.json0000664 0000000 0000000 00000000422 15131732575 0032061 0 ustar 00root root 0000000 0000000 {
"group_snapshots": [
{
"id": "6f519a48-3183-46cf-a32f-41815f813986",
"name": "my_group_snapshot1"
},
{
"id": "aed36625-a6d7-4681-ba59-c7ba3d18c148",
"name": "my_group_snapshot2"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/group-snapshots-show-response.json0000664 0000000 0000000 00000000671 15131732575 0032074 0 ustar 00root root 0000000 0000000 {
"group_snapshot": {
"id": "6f519a48-3183-46cf-a32f-41815f813986",
"group_id": "6f519a48-3183-46cf-a32f-41815f814444",
"status": "available",
"created_at": "2015-09-16T09:28:52.000000",
"name": "my_group_snapshot1",
"description": "my first group snapshot",
"group_type_id": "7270c56e-6354-4528-8e8b-f54dee2232c8",
"project_id": "7ccf4863071f44aeb8f141f65780c51b"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/group-type-create-request.json 0000664 0000000 0000000 00000000344 15131732575 0031145 0 ustar 00root root 0000000 0000000 {
"group_type": {
"name": "grp-type-001",
"description": "group type 0001",
"is_public": true,
"group_specs": {
"consistent_group_snapshot_enabled": " False"
}
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/group-type-default-response.json 0000664 0000000 0000000 00000000331 15131732575 0031470 0 ustar 00root root 0000000 0000000 {
"group_type": {
"id": "7270c56e-6354-4528-8e8b-f54dee2232c8",
"name": "group-type-test",
"description": "default group type",
"is_public": true,
"group_specs": {}
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/group-type-show-response.json 0000664 0000000 0000000 00000000431 15131732575 0031025 0 ustar 00root root 0000000 0000000 {
"group_type": {
"id": "6685584b-1eac-4da6-b5c3-555430cf68ff",
"name": "grp-type-001",
"description": "group type 001",
"is_public": true,
"group_specs": {
"consistent_group_snapshot_enabled": " False"
}
}
}
group-type-specs-create-request.json 0000664 0000000 0000000 00000000122 15131732575 0032173 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"group_specs": {
"key1": "value1",
"key2": "value2"
}
}
group-type-specs-create-response.json 0000664 0000000 0000000 00000000122 15131732575 0032341 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"group_specs": {
"key1": "value1",
"key2": "value2"
}
}
group-type-specs-list-response.json 0000664 0000000 0000000 00000000122 15131732575 0032051 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"group_specs": {
"key1": "value1",
"key2": "value2"
}
}
group-type-specs-show-response.json 0000664 0000000 0000000 00000000031 15131732575 0032055 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"key1": "value1"
}
group-type-specs-update-request.json 0000664 0000000 0000000 00000000031 15131732575 0032211 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"key1": "value1"
}
group-type-specs-update-response.json 0000664 0000000 0000000 00000000031 15131732575 0032357 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"key1": "value1"
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/group-type-update-request.json 0000664 0000000 0000000 00000000202 15131732575 0031155 0 ustar 00root root 0000000 0000000 {
"group_type": {
"name": "grp-type-001",
"description": "group type 0001",
"is_public": true
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/group-types-list-response.json 0000664 0000000 0000000 00000001133 15131732575 0031203 0 ustar 00root root 0000000 0000000 {
"group_types": [
{
"is_public": true,
"group_specs": {
"consistent_group_snapshot_enabled": " False"
},
"id": "6685584b-1eac-4da6-b5c3-555430cf68ff",
"name": "group_type1",
"description": "tempest-group-type-description-1261576824"
},
{
"is_public": true,
"group_specs": {},
"id": "8eb69a46-df97-4e41-9586-9a40a7533803",
"name": "group_type2",
"description": "tempest-group-type-description-3927295731"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/group-update-request.json 0000664 0000000 0000000 00000000306 15131732575 0030203 0 ustar 00root root 0000000 0000000 {
"group": {
"name": "my_group",
"description": "My group",
"add_volumes": "volume-uuid-1,volume-uuid-2",
"remove_volumes": "volume-uuid-8,volume-uuid-9"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/groups-list-detailed-response.json0000664 0000000 0000000 00000002331 15131732575 0031776 0 ustar 00root root 0000000 0000000 {
"groups": [
{
"id": "6f519a48-3183-46cf-a32f-41815f813986",
"status": "available",
"availability_zone": "az1",
"created_at": "2015-09-16T09:28:52.000000",
"name": "my_group1",
"description": "my first group",
"group_type": "29514915-5208-46ab-9ece-1cc4688ad0c1",
"volume_types": [
"4e9e6d23-eed0-426d-b90a-28f87a94b6fe",
"a3d55d15-eeb1-4816-ada9-bf82decc09b3"
],
"volumes": ["a2cdf1ad-5497-4e57-bd7d-f573768f3d03"],
"project_id": "7ccf4863071f44aeb8f141f65780c51b"
},
{
"id": "aed36625-a6d7-4681-ba59-c7ba3d18c148",
"status": "error",
"availability_zone": "az2",
"created_at": "2015-09-16T09:31:15.000000",
"name": "my_group2",
"description": "Edited description",
"group_type": "f8645498-1323-47a2-9442-5c57724d2e3c",
"volume_types": [
"c4daaf47-c530-4901-b28e-f5f0a359c4e6"
],
"volumes": ["a2cdf1ad-5497-4e57-bd7d-f573768f3d03"],
"project_id": "7ccf4863071f44aeb8f141f65780c51b"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/groups-list-response.json 0000664 0000000 0000000 00000000367 15131732575 0030234 0 ustar 00root root 0000000 0000000 {
"groups": [
{
"id": "6f519a48-3183-46cf-a32f-41815f813986",
"name": "my_group1"
},
{
"id": "aed36625-a6d7-4681-ba59-c7ba3d18c148",
"name": "my_group2"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/host-attach-request.json 0000664 0000000 0000000 00000000074 15131732575 0030010 0 ustar 00root root 0000000 0000000 {
"os-attach": {
"host_name": "my_host"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/hosts-get-response.json 0000664 0000000 0000000 00000001166 15131732575 0027657 0 ustar 00root root 0000000 0000000 {
"host": [{
"resource": {
"volume_count": "8",
"total_volume_gb": "11",
"total_snapshot_gb": "1",
"project": "(total)",
"host": "node1@rbd-sas",
"snapshot_count": "1"
}
},
{
"resource": {
"volume_count": "8",
"total_volume_gb": "11",
"total_snapshot_gb": "1",
"project": "f21a9c86d7114bf99c711f4874d80474",
"host": "node1@rbd-sas",
"snapshot_count": "1"
}
}]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/hosts-list-response.json 0000664 0000000 0000000 00000001307 15131732575 0030050 0 ustar 00root root 0000000 0000000 {
"hosts": [{
"service-status": "available",
"service": "cinder-backup",
"zone": "nova",
"service-state": "enabled",
"host_name": "node1",
"last-update": "2017-03-09T21:38:41.000000"
},
{
"service-status": "available",
"service": "cinder-scheduler",
"zone": "nova",
"service-state": "enabled",
"host_name": "node1",
"last-update": "2017-03-09T21:38:38.000000"
},
{
"service-status": "available",
"service": "cinder-volume",
"zone": "nova",
"service-state": "enabled",
"host_name": "node1@lvm",
"last-update": "2017-03-09T21:38:35.000000"
}]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/image-metadata-show-request.json 0000664 0000000 0000000 00000000045 15131732575 0031405 0 ustar 00root root 0000000 0000000 {
"os-show_image_metadata": {}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/image-metadata-show-response.json 0000664 0000000 0000000 00000000116 15131732575 0031552 0 ustar 00root root 0000000 0000000 {
"metadata": {
"key1": "value1",
"key2": "value2"
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/limits/ 0000775 0000000 0000000 00000000000 15131732575 0024510 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/limits/limits-show-response.json 0000664 0000000 0000000 00000000715 15131732575 0031521 0 ustar 00root root 0000000 0000000 {
"limits": {
"rate": [],
"absolute": {
"totalSnapshotsUsed": 0,
"maxTotalBackups": 10,
"maxTotalVolumeGigabytes": 1000,
"maxTotalSnapshots": 10,
"maxTotalBackupGigabytes": 1000,
"totalBackupGigabytesUsed": 0,
"maxTotalVolumes": 10,
"totalVolumesUsed": 0,
"totalBackupsUsed": 0,
"totalGigabytesUsed": 0
}
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/messages-list-response.json 0000664 0000000 0000000 00000003431 15131732575 0030517 0 ustar 00root root 0000000 0000000 {
"messages": [{
"request_id": "req-c1216709-afba-4703-a1a3-22eda88f2f5a",
"links": [
{
"href": "http://localhost:8776/v3/cd609134301246f0a3faa9c3da22082e/messages/c506cd4b-9048-43bc-97ef-0d7dec369b42",
"rel": "self"
},
{
"href": "http://localhost:8776/cd609134301246f0a3faa9c3da22082e/messages/c506cd4b-9048-43bc-97ef-0d7dec369b42",
"rel": "bookmark"
}
],
"message_level": "ERROR",
"event_id": "VOLUME_000002",
"created_at": "2014-10-28T00:00:00-00:00",
"guaranteed_until": "2014-10-28T00:00:00-00:00",
"resource_uuid": "d5f6c517-c3e8-45fe-b994-b11118e4cacf",
"id": "c506cd4b-9048-43bc-97ef-0d7dec369b42",
"resource_type": "VOLUME",
"user_message": "No storage could be allocated for this volume request."
},{
"request_id": "req-c1216709-afba-4703-a1a3-22eda88f2f5a",
"links": [
{
"href": "http://localhost:8776/v3/cd609134301246f0a3faa9c3da22082e/messages/c506cd4b-9048-43bc-97ef-0d7dec369b42",
"rel": "self"
},
{
"href": "http://localhost:8776/cd609134301246f0a3faa9c3da22082e/messages/c506cd4b-9048-43bc-97ef-0d7dec369b42",
"rel": "bookmark"
}
],
"message_level": "ERROR",
"event_id": "VOLUME_000002",
"created_at": "2014-10-28T00:00:00-00:00",
"guaranteed_until": "2014-10-28T00:00:00-00:00",
"resource_uuid": "d5f6c517-c3e8-45fe-b994-b11118e4df4e",
"id": "c506cd4b-9048-43bc-97ef-0d7dec36d5gt",
"resource_type": "VOLUME",
"user_message": "No storage could be allocated for this volume request."
}]
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/messages-show-response.json 0000664 0000000 0000000 00000001624 15131732575 0030526 0 ustar 00root root 0000000 0000000 {
"message": {
"request_id": "req-c1216709-afba-4703-a1a3-22eda88f2f5a",
"links": [
{
"href": "http://localhost:8776/v3/cd609134301246f0a3faa9c3da22082e/messages/c506cd4b-9048-43bc-97ef-0d7dec369b42",
"rel": "self"
},
{
"href": "http://localhost:8776/cd609134301246f0a3faa9c3da22082e/messages/c506cd4b-9048-43bc-97ef-0d7dec369b42",
"rel": "bookmark"
}
],
"message_level": "ERROR",
"event_id": "VOLUME_000002",
"created_at": "2014-10-28T00:00:00-00:00",
"guaranteed_until": "2014-10-28T00:00:00-00:00",
"resource_uuid": "d5f6c517-c3e8-45fe-b994-b11118e4cacf",
"id": "c506cd4b-9048-43bc-97ef-0d7dec369b42",
"resource_type": "VOLUME",
"user_message": "No storage could be allocated for this volume request."
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/os-services/ 0000775 0000000 0000000 00000000000 15131732575 0025451 5 ustar 00root root 0000000 0000000 service-disable-log-reason-request.json 0000664 0000000 0000000 00000000126 15131732575 0035057 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/os-services {
"host": "host1",
"binary": "cinder-volume",
"disabled_reason": "test2"
} service-disable-log-reason-response.json 0000664 0000000 0000000 00000000231 15131732575 0035222 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/os-services {
"binary": "cinder-volume",
"disabled": true,
"disabled_reason": "test2",
"host": "host1",
"service": "",
"status": "disabled"
} service-disable-request.json 0000664 0000000 0000000 00000000066 15131732575 0033016 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/os-services {
"host": "host1",
"binary": "cinder-volume"
} service-disable-response.json 0000664 0000000 0000000 00000000171 15131732575 0033161 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/os-services {
"binary": "cinder-volume",
"disabled": true,
"host": "host1",
"service": "",
"status": "disabled"
} service-enable-request.json 0000664 0000000 0000000 00000000066 15131732575 0032641 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/os-services {
"host": "host1",
"binary": "cinder-volume"
} service-enable-response.json 0000664 0000000 0000000 00000000171 15131732575 0033004 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/os-services {
"binary": "cinder-volume",
"disabled": false,
"host": "host1",
"service": "",
"status": "enabled"
} service-failover-host-request.json 0000664 0000000 0000000 00000000027 15131732575 0034172 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/os-services {
"host": "host1"
} service-freeze-request.json 0000664 0000000 0000000 00000000027 15131732575 0032670 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/os-services {
"host": "host1"
} service-thaw-request.json 0000664 0000000 0000000 00000000027 15131732575 0032353 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/os-services {
"host": "host1"
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/os-services/v3.32/ 0000775 0000000 0000000 00000000000 15131732575 0026224 5 ustar 00root root 0000000 0000000 service-get-log-request.json 0000664 0000000 0000000 00000000146 15131732575 0033523 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/os-services/v3.32 {
"binary": "cinder-volume",
"server": "devstack@lvmdriver-1",
"prefix": "cinder.volume"
} service-get-log-response.json 0000664 0000000 0000000 00000001040 15131732575 0033663 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/os-services/v3.32 {
"log_levels": [
{
"binary": "cinder-volume",
"host": "host2",
"levels": {
"cinder.volume.api": "DEBUG"
}
},
{
"binary": "cinder-volume",
"host": "host2",
"levels": {
"cinder.volume.api": "DEBUG"
}
},
{
"binary": "cinder-volume",
"host": "host2",
"levels": {
"cinder.volume.api": "DEBUG"
}
}
]
} service-set-log-request.json 0000664 0000000 0000000 00000000174 15131732575 0033540 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/os-services/v3.32 {
"binary": "cinder-volume",
"server": "devstack@lvmdriver-1",
"prefix": "cinder.volume",
"level": "ERROR"
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/os-services/v3.49/ 0000775 0000000 0000000 00000000000 15131732575 0026234 5 ustar 00root root 0000000 0000000 services-list-response.json 0000664 0000000 0000000 00000005253 15131732575 0033505 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/os-services/v3.49 {
"services": [
{
"binary": "cinder-scheduler",
"cluster": null,
"disabled_reason": "test1",
"host": "host1",
"state": "down",
"status": "disabled",
"updated_at": "2012-10-29T13:42:02.000000",
"zone": "cinder"
},
{
"active_backend_id": null,
"backend_state": null,
"binary": "cinder-volume",
"cluster": null,
"disabled_reason": "test2",
"frozen": false,
"host": "host1",
"replication_status": null,
"state": "down",
"status": "disabled",
"updated_at": "2012-10-29T13:42:05.000000",
"zone": "cinder"
},
{
"binary": "cinder-scheduler",
"cluster": "cluster1",
"disabled_reason": "",
"host": "host2",
"state": "down",
"status": "enabled",
"updated_at": "2012-09-19T06:55:34.000000",
"zone": "cinder"
},
{
"active_backend_id": null,
"backend_state": null,
"binary": "cinder-volume",
"cluster": "cluster1",
"disabled_reason": "test4",
"frozen": false,
"host": "host2",
"replication_status": null,
"state": "down",
"status": "disabled",
"updated_at": "2012-09-18T08:03:38.000000",
"zone": "cinder"
},
{
"active_backend_id": null,
"backend_state": null,
"binary": "cinder-volume",
"cluster": "cluster2",
"disabled_reason": "test5",
"frozen": false,
"host": "host2",
"replication_status": null,
"state": "down",
"status": "disabled",
"updated_at": "2012-10-29T13:42:05.000000",
"zone": "cinder"
},
{
"active_backend_id": null,
"backend_state": null,
"binary": "cinder-volume",
"cluster": "cluster2",
"disabled_reason": "",
"frozen": false,
"host": "host2",
"replication_status": null,
"state": "down",
"status": "enabled",
"updated_at": "2012-09-18T08:03:38.000000",
"zone": "cinder"
},
{
"binary": "cinder-scheduler",
"cluster": null,
"disabled_reason": "",
"host": "host2",
"state": "down",
"status": "enabled",
"updated_at": null,
"zone": "cinder"
}
]
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/pools-list-detailed-response.json 0000664 0000000 0000000 00000001627 15131732575 0031622 0 ustar 00root root 0000000 0000000 {
"pools": [
{
"name": "pool1",
"capabilities": {
"updated": "2014-10-28T00:00:00-00:00",
"total_capacity_gb": 1024,
"free_capacity_gb": 100,
"volume_backend_name": "pool1",
"reserved_percentage": 0,
"driver_version": "1.0.0",
"storage_protocol": "iSCSI",
"QoS_support": false
}
},
{
"name": "pool2",
"capabilities": {
"updated": "2014-10-28T00:00:00-00:00",
"total_capacity_gb": 512,
"free_capacity_gb": 200,
"volume_backend_name": "pool2",
"reserved_percentage": 0,
"driver_version": "1.0.1",
"storage_protocol": "iSER",
"QoS_support": true
}
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/qos/ 0000775 0000000 0000000 00000000000 15131732575 0024011 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/qos/qos-create-request.json 0000664 0000000 0000000 00000000100 15131732575 0030424 0 ustar 00root root 0000000 0000000 {
"qos_specs": {
"name": "reliability-spec"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/qos/qos-create-response.json 0000664 0000000 0000000 00000001061 15131732575 0030601 0 ustar 00root root 0000000 0000000 {
"qos_specs": {
"specs": {},
"consumer": "back-end",
"name": "reliability-spec",
"id": "599ef437-1c99-42ec-9fc6-239d0519fef1"
},
"links": [
{
"href": "http://23.253.248.171:8776/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/qos-specs/599ef437-1c99-42ec-9fc6-239d0519fef1",
"rel": "self"
},
{
"href": "http://23.253.248.171:8776/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/qos-specs/599ef437-1c99-42ec-9fc6-239d0519fef1",
"rel": "bookmark"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/qos/qos-list-response.json 0000664 0000000 0000000 00000000316 15131732575 0030313 0 ustar 00root root 0000000 0000000 {
"qos_specs": [
{
"consumer": "back-end",
"id": "62c17294-2e52-4877-a01f-a30388749d9d",
"name": "reliability-spec",
"specs": {}
}
]
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/qos/qos-show-response.json 0000664 0000000 0000000 00000001061 15131732575 0030316 0 ustar 00root root 0000000 0000000 {
"qos_specs": {
"specs": {},
"consumer": "back-end",
"name": "reliability-spec",
"id": "0388d6c6-d5d4-42a3-b289-95205c50dd15"
},
"links": [
{
"href": "http://23.253.228.211:8776/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/qos-specs/0388d6c6-d5d4-42a3-b289-95205c50dd15",
"rel": "self"
},
{
"href": "http://23.253.228.211:8776/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/qos-specs/0388d6c6-d5d4-42a3-b289-95205c50dd15",
"rel": "bookmark"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/qos/qos-unset-request.json 0000664 0000000 0000000 00000000047 15131732575 0030331 0 ustar 00root root 0000000 0000000 {
"keys": [
"key1"
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/qos/qos-update-request.json 0000664 0000000 0000000 00000000062 15131732575 0030452 0 ustar 00root root 0000000 0000000 {
"qos_specs": {
"delay": "1"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/qos/qos-update-response.json 0000664 0000000 0000000 00000000062 15131732575 0030620 0 ustar 00root root 0000000 0000000 {
"qos_specs": {
"delay": "1"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/qos/qos_show_response.json 0000664 0000000 0000000 00000000036 15131732575 0030463 0 ustar 00root root 0000000 0000000 {
"qos_associations": []
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/quota-classes-show-response.json 0000664 0000000 0000000 00000000545 15131732575 0031504 0 ustar 00root root 0000000 0000000 {
"quota_class_set": {
"per_volume_gigabytes": -1,
"volumes_lvmdriver-1": -1,
"groups": 10,
"gigabytes": 1000,
"backup_gigabytes": 1000,
"snapshots": 10,
"gigabytes_lvmdriver-1": -1,
"volumes": 10,
"snapshots_lvmdriver-1": -1,
"backups": 10,
"id": "default"
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/quota-classes-update-request.json 0000664 0000000 0000000 00000000170 15131732575 0031632 0 ustar 00root root 0000000 0000000 {
"quota_class_set": {
"volumes_lmv": 10,
"gigabytes_lmv": 1000,
"snapshots_lmv": 10
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/quota-classes-update-response.json0000664 0000000 0000000 00000000514 15131732575 0032002 0 ustar 00root root 0000000 0000000 {
"quota_class_set": {
"per_volume_gigabytes": -1,
"volumes_lvmdriver-1": -1,
"groups": 10,
"gigabytes": 1000,
"backup_gigabytes": 1000,
"snapshots": 10,
"gigabytes_lvmdriver-1": -1,
"volumes": 10,
"snapshots_lvmdriver-1": -1,
"backups": 10
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/quota_classes/ 0000775 0000000 0000000 00000000000 15131732575 0026055 5 ustar 00root root 0000000 0000000 quota-classes-show-response.json 0000664 0000000 0000000 00000000550 15131732575 0034267 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/quota_classes {
"quota_class_set": {
"backup_gigabytes": 1000,
"backups": 10,
"gigabytes": 1000,
"gigabytes___DEFAULT__": -1,
"groups": 10,
"id": "test_class",
"per_volume_gigabytes": -1,
"snapshots": 10,
"snapshots___DEFAULT__": -1,
"volumes": 10,
"volumes___DEFAULT__": -1
}
} quota-classes-update-request.json 0000664 0000000 0000000 00000000154 15131732575 0034423 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/quota_classes {
"quota_class_set": {
"volumes": 10,
"gigabytes": 1000,
"snapshots": 10
}
} quota-classes-update-response.json 0000664 0000000 0000000 00000000514 15131732575 0034571 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/quota_classes {
"quota_class_set": {
"backup_gigabytes": 1000,
"backups": 10,
"gigabytes": 1000,
"gigabytes___DEFAULT__": -1,
"groups": 10,
"per_volume_gigabytes": -1,
"snapshots": 10,
"snapshots___DEFAULT__": -1,
"volumes": 10,
"volumes___DEFAULT__": -1
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/quota_sets/ 0000775 0000000 0000000 00000000000 15131732575 0025376 5 ustar 00root root 0000000 0000000 quotas-show-defaults-response.json 0000664 0000000 0000000 00000000543 15131732575 0034147 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/quota_sets {
"quota_set": {
"backup_gigabytes": 1000,
"backups": 10,
"gigabytes": 1000,
"gigabytes___DEFAULT__": -1,
"groups": 10,
"id": "fake_tenant",
"per_volume_gigabytes": -1,
"snapshots": 10,
"snapshots___DEFAULT__": -1,
"volumes": 10,
"volumes___DEFAULT__": -1
}
} quotas-show-response.json 0000664 0000000 0000000 00000000543 15131732575 0032342 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/quota_sets {
"quota_set": {
"backup_gigabytes": 1000,
"backups": 10,
"gigabytes": 1000,
"gigabytes___DEFAULT__": -1,
"groups": 10,
"id": "fake_tenant",
"per_volume_gigabytes": -1,
"snapshots": 10,
"snapshots___DEFAULT__": -1,
"volumes": 10,
"volumes___DEFAULT__": -1
}
} quotas-show-usage-response.json 0000664 0000000 0000000 00000002265 15131732575 0033447 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/quota_sets {
"quota_set": {
"backup_gigabytes": {
"in_use": 0,
"limit": 1000,
"reserved": 0
},
"backups": {
"in_use": 0,
"limit": 10,
"reserved": 0
},
"gigabytes": {
"in_use": 0,
"limit": 1000,
"reserved": 0
},
"gigabytes___DEFAULT__": {
"in_use": 0,
"limit": -1,
"reserved": 0
},
"groups": {
"in_use": 0,
"limit": 10,
"reserved": 0
},
"id": "fake_tenant",
"per_volume_gigabytes": {
"in_use": 0,
"limit": -1,
"reserved": 0
},
"snapshots": {
"in_use": 0,
"limit": 10,
"reserved": 0
},
"snapshots___DEFAULT__": {
"in_use": 0,
"limit": -1,
"reserved": 0
},
"volumes": {
"in_use": 0,
"limit": 10,
"reserved": 0
},
"volumes___DEFAULT__": {
"in_use": 0,
"limit": -1,
"reserved": 0
}
}
} quotas-update-request.json 0000664 0000000 0000000 00000000135 15131732575 0032473 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/quota_sets {
"quota_set":{
"groups": 11,
"volumes": 5,
"backups": 4
}
}
quotas-update-response.json 0000664 0000000 0000000 00000000504 15131732575 0032641 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/quota_sets {
"quota_set": {
"backup_gigabytes": 1000,
"backups": 4,
"gigabytes": 1000,
"gigabytes___DEFAULT__": -1,
"groups": 11,
"per_volume_gigabytes": -1,
"snapshots": 10,
"snapshots___DEFAULT__": -1,
"volumes": 5,
"volumes___DEFAULT__": -1
}
} resource-filters-list-response.json 0000664 0000000 0000000 00000000661 15131732575 0032130 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"resource_filters": [
{
"filters": [
"name",
"status",
"image_metadata", "bootable",
"migration_status"
],
"resource": "volume"
},
{
"filters": [
"name",
"status",
"volume_id"
],
"resource": "snapshot"
}
]
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/services-disable-log-request.json 0000664 0000000 0000000 00000000145 15131732575 0031573 0 ustar 00root root 0000000 0000000 {
"binary": "cinder-volume",
"host": "devstack@lvmdriver-1",
"disabled_reason": "test"
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/services-disable-log-response.json0000664 0000000 0000000 00000000250 15131732575 0031736 0 ustar 00root root 0000000 0000000 {
"disabled": true,
"status": "disabled",
"host": "devstack@lvmdriver-1",
"service": "",
"binary": "cinder-volume",
"disabled_reason": "test"
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/services-disable-request.json 0000664 0000000 0000000 00000000106 15131732575 0031011 0 ustar 00root root 0000000 0000000 {
"binary": "cinder-volume",
"host": "devstack@lvmdriver-1"
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/services-disable-response.json 0000664 0000000 0000000 00000000211 15131732575 0031154 0 ustar 00root root 0000000 0000000 {
"disabled": true,
"status": "disabled",
"host": "devstack@lvmdriver-1",
"service": "",
"binary": "cinder-volume"
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/services-enable-request.json 0000664 0000000 0000000 00000000106 15131732575 0030634 0 ustar 00root root 0000000 0000000 {
"binary": "cinder-volume",
"host": "devstack@lvmdriver-1"
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/services-enable-response.json 0000664 0000000 0000000 00000000246 15131732575 0031007 0 ustar 00root root 0000000 0000000 {
"disabled": false,
"status": "enabled",
"host": "devstack@lvmdriver-1",
"service": "",
"binary": "cinder-volume",
"disabled_reason": null
}
services-failover-host-request.json 0000664 0000000 0000000 00000000077 15131732575 0032120 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"host": "devstack@lvmdriver-1",
"backend_id": null
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/services-freeze-request.json 0000664 0000000 0000000 00000000043 15131732575 0030666 0 ustar 00root root 0000000 0000000 {
"host": "devstack@rbd-sas"
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/services-get-log-request.json 0000664 0000000 0000000 00000000147 15131732575 0030751 0 ustar 00root root 0000000 0000000 {
"binary": "cinder-volume",
"server": "devstack@lvmdriver-1",
"prefix": "cinder.volume"
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/services-get-log-response.json 0000664 0000000 0000000 00000001071 15131732575 0031114 0 ustar 00root root 0000000 0000000 {
"log_levels": [{
"binary": "cinder-api",
"host": "devstack",
"levels": {
"cinder.volume.api": "DEBUG"
}
},
{
"binary": "cinder-scheduler",
"host": "devstack",
"levels": {
"cinder.volume.api": "DEBUG"
}
},
{
"binary": "cinder-backup",
"host": "devstack",
"levels": {}
},
{
"binary": "cinder-volume",
"host": "devstack@lvmdriver-1",
"levels": {
"cinder.volume.api": "DEBUG"
}
}]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/services-list-response.json 0000664 0000000 0000000 00000001543 15131732575 0030535 0 ustar 00root root 0000000 0000000 {
"services": [{
"status": "enabled",
"binary": "cinder-scheduler",
"zone": "nova",
"state": "up",
"updated_at": "2017-06-29T05:50:35.000000",
"host": "devstack",
"disabled_reason": null
},
{
"status": "enabled",
"binary": "cinder-backup",
"zone": "nova",
"state": "up",
"updated_at": "2017-06-29T05:50:42.000000",
"host": "devstack",
"disabled_reason": null
},
{
"status": "enabled",
"binary": "cinder-volume",
"zone": "nova",
"frozen": false,
"state": "up",
"updated_at": "2017-06-29T05:50:39.000000",
"cluster": null,
"host": "devstack@lvmdriver-1",
"replication_status": "disabled",
"active_backend_id": null,
"disabled_reason": null
}]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/services-set-log-request.json 0000664 0000000 0000000 00000000175 15131732575 0030766 0 ustar 00root root 0000000 0000000 {
"binary": "cinder-volume",
"server": "devstack@lvmdriver-1",
"prefix": "cinder.volume",
"level": "ERROR"
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/services-thaw-request.json 0000664 0000000 0000000 00000000043 15131732575 0030351 0 ustar 00root root 0000000 0000000 {
"host": "devstack@rbd-sas"
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/set-default-type-request.json 0000664 0000000 0000000 00000000104 15131732575 0030757 0 ustar 00root root 0000000 0000000 {
"default_type": {
"volume_type": "lvm_backend"
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/set-default-type-response.json 0000664 0000000 0000000 00000000236 15131732575 0031133 0 ustar 00root root 0000000 0000000 {
"default_type": {
"project_id": "6685584b-1eac-4da6-b5c3-555430cf68ff",
"volume_type_id": "40ec6e5e-c9bd-4170-8740-c1cd42d7eabb"
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshot-force-delete-request.json0000664 0000000 0000000 00000000036 15131732575 0031762 0 ustar 00root root 0000000 0000000 {
"os-force_delete": {}
}
snapshot-manage-list-detail-response.json 0000664 0000000 0000000 00000001602 15131732575 0033154 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"manageable-snapshots": [
{
"cinder_id": null,
"reason_not_safe": null,
"reference": {
"source-name": "lvol0"
},
"source_reference": {
"source-name": "volume-7c064b34-1e4b-40bd-93ca-4ac5a973661b"
},
"safe_to_manage": true,
"size": 1,
"extra_info": null
},
{
"cinder_id": "d0c84570-a01f-4579-9789-5e9f266587cd",
"reason_not_safe": "already managed",
"reference": {
"source-name":"_snapshot-d0c84570-a01f-4579-9789-5e9f266587cd"
},
"source_reference": {
"source-name": "volume-7c064b34-1e4b-40bd-93ca-4ac5a973661b"
},
"safe_to_manage": false,
"size": 1,
"extra_info": null
}
]
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshot-manage-list-response.json0000664 0000000 0000000 00000001214 15131732575 0031772 0 ustar 00root root 0000000 0000000 {
"manageable-snapshots": [
{
"source_reference": {
"source-name": "volume-7c064b34-1e4b-40bd-93ca-4ac5a973661b"
},
"safe_to_manage": true,
"reference": {
"source-name": "lvol0"
},
"size": 1
},
{
"source_reference": {
"source-name": "volume-7c064b34-1e4b-40bd-93ca-4ac5a973661b"
},
"safe_to_manage": false,
"reference": {
"source-name": "_snapshot-d0c84570-a01f-4579-9789-5e9f266587cd"
},
"size": 1
}
]
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshot-status-reset-request.json0000664 0000000 0000000 00000000101 15131732575 0032060 0 ustar 00root root 0000000 0000000 {
"os-reset_status": {
"status": "available"
}
}
snapshot-status-update-request.json 0000664 0000000 0000000 00000000145 15131732575 0032151 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"os-update_snapshot_status": {
"status": "creating",
"progress": "80%"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshot_manage_extensions/ 0000775 0000000 0000000 00000000000 15131732575 0030635 5 ustar 00root root 0000000 0000000 snapshot-manage-request.json 0000664 0000000 0000000 00000000343 15131732575 0036224 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshot_manage_extensions {
"snapshot": {
"description": null,
"metadata": null,
"ref": {
"source-name": "lvol0"
},
"name": null,
"volume_id": "1df34919-aba7-4a1b-a614-3b409d71ac03"
}
} snapshot-manage-response.json 0000664 0000000 0000000 00000001002 15131732575 0036363 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshot_manage_extensions {
"snapshot": {
"created_at": "2018-09-26T03:45:03.893592",
"description": "this is a new snapshot",
"id": "b6314a71-9d3d-439a-861d-b790def0d693",
"metadata": {
"manage-snap-meta1": "value1",
"manage-snap-meta2": "value2",
"manage-snap-meta3": "value3"
},
"name": "new_snapshot",
"size": 1,
"status": "creating",
"updated_at": "null",
"volume_id": "1df34919-aba7-4a1b-a614-3b409d71ac03"
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots/ 0000775 0000000 0000000 00000000000 15131732575 0025231 5 ustar 00root root 0000000 0000000 snapshot-create-request.json 0000664 0000000 0000000 00000000353 15131732575 0032634 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots {
"snapshot": {
"name": "snap-001",
"description": "Daily backup",
"volume_id": "5aa119a8-d25b-45a7-8d1b-88e127885635",
"force": true,
"metadata": {
"key": "v3"
}
}
}
snapshot-create-response.json 0000664 0000000 0000000 00000000614 15131732575 0033002 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots {
"snapshot": {
"created_at": "2019-03-11T16:24:34.469003",
"description": "Daily backup",
"id": "b36476e5-d18b-47f9-ac69-4818cb43ee21",
"metadata": {
"key": "v3"
},
"name": "snap-001",
"size": 10,
"status": "creating",
"updated_at": null,
"volume_id": "d291b81c-6e40-4525-8231-90aa1588121e"
}
}
snapshot-metadata-create-request.json 0000664 0000000 0000000 00000000060 15131732575 0034405 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots {
"metadata": {
"key": "v3"
}
}
snapshot-metadata-create-response.json 0000664 0000000 0000000 00000000062 15131732575 0034555 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots {
"metadata": {
"key": "value"
}
} snapshot-metadata-show-key-response.json 0000664 0000000 0000000 00000000053 15131732575 0035060 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots {
"meta": {
"key": "v3"
}
} snapshot-metadata-show-response.json 0000664 0000000 0000000 00000000057 15131732575 0034276 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots {
"metadata": {
"key": "v3"
}
} snapshot-metadata-update-key-request.json 0000664 0000000 0000000 00000000052 15131732575 0035213 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots {
"meta": {
"key": "new_value"
}
} snapshot-metadata-update-key-response.json 0000664 0000000 0000000 00000000062 15131732575 0035362 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots {
"meta": {
"key": "new_value"
}
} snapshot-metadata-update-request.json 0000664 0000000 0000000 00000000072 15131732575 0034427 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots {
"metadata": {
"new_key": "new_value"
}
} snapshot-metadata-update-response.json 0000664 0000000 0000000 00000000072 15131732575 0034575 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots {
"metadata": {
"new_key": "new_value"
}
} snapshot-show-response.json 0000664 0000000 0000000 00000001044 15131732575 0032515 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots {
"snapshot": {
"created_at": "2019-03-12T04:42:00.809352",
"description": "Daily backup",
"id": "4a584cae-e4ce-429b-9154-d4c9eb8fda4c",
"metadata": {
"key": "v3"
},
"name": "snap-001",
"os-extended-snapshot-attributes:progress": "0%",
"os-extended-snapshot-attributes:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"size": 10,
"status": "creating",
"updated_at": null,
"volume_id": "b72c48f1-64b7-4cd8-9745-b12e0be82d37"
}
}
snapshot-update-request.json 0000664 0000000 0000000 00000000157 15131732575 0032655 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots {
"snapshot": {
"name": "snap-002",
"description": "This is yet, another snapshot."
}
} snapshot-update-response.json 0000664 0000000 0000000 00000000636 15131732575 0033025 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots {
"snapshot": {
"created_at": "2019-03-12T04:53:53.426591",
"description": "This is yet, another snapshot.",
"id": "43666194-8e72-451a-b7bb-54fef763b2b8",
"metadata": {
"key": "v3"
},
"name": "snap-002",
"size": 10,
"status": "creating",
"updated_at": null,
"volume_id": "070c942d-9909-42e9-a467-7a781f150c58"
}
}
snapshots-list-detailed-response.json 0000664 0000000 0000000 00000001155 15131732575 0034447 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots {
"snapshots": [
{
"created_at": "2019-03-11T16:24:36.464445",
"description": "Daily backup",
"id": "d0083dc5-8795-4c1a-bc9c-74f70006c205",
"metadata": {
"key": "v3"
},
"name": "snap-001",
"os-extended-snapshot-attributes:progress": "0%",
"os-extended-snapshot-attributes:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"size": 10,
"status": "creating",
"updated_at": null,
"volume_id": "7acd675e-4e06-4653-af9f-2ecd546342d6"
}
]
}
snapshots-list-response.json 0000664 0000000 0000000 00000000714 15131732575 0032676 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots {
"snapshots": [
{
"created_at": "2019-03-11T16:29:08.973832",
"description": "Daily backup",
"id": "2c228773-50eb-422d-be7e-b5c6ced0c7a9",
"metadata": {
"key": "v3"
},
"name": "snap-001",
"size": 10,
"status": "creating",
"updated_at": null,
"volume_id": "428ec041-b999-40d8-8a54-9e98b19406cc"
}
]
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots/v3.14/ 0000775 0000000 0000000 00000000000 15131732575 0026004 5 ustar 00root root 0000000 0000000 snapshot-create-response.json 0000664 0000000 0000000 00000000657 15131732575 0033564 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots/v3.14 {
"snapshot": {
"created_at": "2019-03-11T16:24:34.469003",
"description": "Daily backup",
"id": "b36476e5-d18b-47f9-ac69-4818cb43ee21",
"metadata": {
"key": "v3"
},
"name": "snap-001",
"size": 10,
"status": "creating",
"updated_at": null,
"volume_id": "d291b81c-6e40-4525-8231-90aa1588121e",
"group_snapshot_id": null
}
}
snapshot-show-response.json 0000664 0000000 0000000 00000001107 15131732575 0033270 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots/v3.14 {
"snapshot": {
"created_at": "2019-03-12T04:42:00.809352",
"description": "Daily backup",
"id": "4a584cae-e4ce-429b-9154-d4c9eb8fda4c",
"metadata": {
"key": "v3"
},
"name": "snap-001",
"os-extended-snapshot-attributes:progress": "0%",
"os-extended-snapshot-attributes:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"size": 10,
"status": "creating",
"updated_at": null,
"volume_id": "b72c48f1-64b7-4cd8-9745-b12e0be82d37",
"group_snapshot_id": null
}
}
snapshot-update-response.json 0000664 0000000 0000000 00000000701 15131732575 0033571 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots/v3.14 {
"snapshot": {
"created_at": "2019-03-12T04:53:53.426591",
"description": "This is yet, another snapshot.",
"id": "43666194-8e72-451a-b7bb-54fef763b2b8",
"metadata": {
"key": "v3"
},
"name": "snap-002",
"size": 10,
"status": "creating",
"updated_at": null,
"volume_id": "070c942d-9909-42e9-a467-7a781f150c58",
"group_snapshot_id": null
}
}
snapshots-list-detailed-response.json 0000664 0000000 0000000 00000001224 15131732575 0035217 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots/v3.14 {
"snapshots": [
{
"created_at": "2019-03-11T16:24:36.464445",
"description": "Daily backup",
"id": "d0083dc5-8795-4c1a-bc9c-74f70006c205",
"metadata": {
"key": "v3"
},
"name": "snap-001",
"os-extended-snapshot-attributes:progress": "0%",
"os-extended-snapshot-attributes:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"size": 10,
"status": "creating",
"updated_at": null,
"volume_id": "7acd675e-4e06-4653-af9f-2ecd546342d6",
"group_snapshot_id": null
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots/v3.41/ 0000775 0000000 0000000 00000000000 15131732575 0026004 5 ustar 00root root 0000000 0000000 snapshot-create-response.json 0000664 0000000 0000000 00000000752 15131732575 0033560 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots/v3.41 {
"snapshot": {
"created_at": "2019-03-11T16:24:34.469003",
"description": "Daily backup",
"id": "b36476e5-d18b-47f9-ac69-4818cb43ee21",
"metadata": {
"key": "v3"
},
"name": "snap-001",
"size": 10,
"status": "creating",
"updated_at": null,
"volume_id": "d291b81c-6e40-4525-8231-90aa1588121e",
"group_snapshot_id": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e"
}
}
snapshot-show-response.json 0000664 0000000 0000000 00000001202 15131732575 0033264 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots/v3.41 {
"snapshot": {
"created_at": "2019-03-12T04:42:00.809352",
"description": "Daily backup",
"id": "4a584cae-e4ce-429b-9154-d4c9eb8fda4c",
"metadata": {
"key": "v3"
},
"name": "snap-001",
"os-extended-snapshot-attributes:progress": "0%",
"os-extended-snapshot-attributes:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"size": 10,
"status": "creating",
"updated_at": null,
"volume_id": "b72c48f1-64b7-4cd8-9745-b12e0be82d37",
"group_snapshot_id": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e"
}
}
snapshot-update-response.json 0000664 0000000 0000000 00000000774 15131732575 0033603 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots/v3.41 {
"snapshot": {
"created_at": "2019-03-12T04:53:53.426591",
"description": "This is yet, another snapshot.",
"id": "43666194-8e72-451a-b7bb-54fef763b2b8",
"metadata": {
"key": "v3"
},
"name": "snap-002",
"size": 10,
"status": "creating",
"updated_at": null,
"volume_id": "070c942d-9909-42e9-a467-7a781f150c58",
"group_snapshot_id": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e"
}
}
snapshots-list-detailed-response.json 0000664 0000000 0000000 00000001323 15131732575 0035217 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots/v3.41 {
"snapshots": [
{
"created_at": "2019-03-11T16:24:36.464445",
"description": "Daily backup",
"id": "d0083dc5-8795-4c1a-bc9c-74f70006c205",
"metadata": {
"key": "v3"
},
"name": "snap-001",
"os-extended-snapshot-attributes:progress": "0%",
"os-extended-snapshot-attributes:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"size": 10,
"status": "creating",
"updated_at": null,
"volume_id": "7acd675e-4e06-4653-af9f-2ecd546342d6",
"group_snapshot_id": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots/v3.65/ 0000775 0000000 0000000 00000000000 15131732575 0026012 5 ustar 00root root 0000000 0000000 snapshot-create-response.json 0000664 0000000 0000000 00000001012 15131732575 0033554 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots/v3.65 {
"snapshot": {
"created_at": "2019-03-11T16:24:34.469003",
"description": "Daily backup",
"id": "b36476e5-d18b-47f9-ac69-4818cb43ee21",
"metadata": {
"key": "v3"
},
"name": "snap-001",
"size": 10,
"status": "creating",
"updated_at": null,
"volume_id": "d291b81c-6e40-4525-8231-90aa1588121e",
"group_snapshot_id": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"consumes_quota": true
}
}
snapshot-show-response.json 0000664 0000000 0000000 00000001242 15131732575 0033276 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots/v3.65 {
"snapshot": {
"created_at": "2019-03-12T04:42:00.809352",
"description": "Daily backup",
"id": "4a584cae-e4ce-429b-9154-d4c9eb8fda4c",
"metadata": {
"key": "v3"
},
"name": "snap-001",
"os-extended-snapshot-attributes:progress": "0%",
"os-extended-snapshot-attributes:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"size": 10,
"status": "creating",
"updated_at": null,
"volume_id": "b72c48f1-64b7-4cd8-9745-b12e0be82d37",
"group_snapshot_id": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"consumes_quota": true
}
}
snapshot-update-response.json 0000664 0000000 0000000 00000001034 15131732575 0033577 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots/v3.65 {
"snapshot": {
"created_at": "2019-03-12T04:53:53.426591",
"description": "This is yet, another snapshot.",
"id": "43666194-8e72-451a-b7bb-54fef763b2b8",
"metadata": {
"key": "v3"
},
"name": "snap-002",
"size": 10,
"status": "creating",
"updated_at": null,
"volume_id": "070c942d-9909-42e9-a467-7a781f150c58",
"group_snapshot_id": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"consumes_quota": true
}
}
snapshots-list-detailed-response.json 0000664 0000000 0000000 00000001367 15131732575 0035235 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/snapshots/v3.65 {
"snapshots": [
{
"created_at": "2019-03-11T16:24:36.464445",
"description": "Daily backup",
"id": "d0083dc5-8795-4c1a-bc9c-74f70006c205",
"metadata": {
"key": "v3"
},
"name": "snap-001",
"os-extended-snapshot-attributes:progress": "0%",
"os-extended-snapshot-attributes:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"size": 10,
"status": "creating",
"updated_at": null,
"volume_id": "7acd675e-4e06-4653-af9f-2ecd546342d6",
"group_snapshot_id": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"consumes_quota": true
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/user-quotas-show-response.json 0000664 0000000 0000000 00000000643 15131732575 0031207 0 ustar 00root root 0000000 0000000 {
"quota_set": {
"cores": 20,
"fixed_ips": -1,
"floating_ips": 10,
"id": "fake_project",
"injected_file_content_bytes": 10240,
"injected_file_path_bytes": 255,
"injected_files": 5,
"instances": 10,
"key_pairs": 100,
"metadata_items": 128,
"ram": 51200,
"security_group_rules": 20,
"security_groups": 10
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/user-quotas-update-request.json 0000664 0000000 0000000 00000000113 15131732575 0031333 0 ustar 00root root 0000000 0000000 {
"quota_set": {
"force": true,
"instances": 9
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/user-quotas-update-response.json 0000664 0000000 0000000 00000000604 15131732575 0031506 0 ustar 00root root 0000000 0000000 {
"quota_set": {
"cores": 20,
"floating_ips": 10,
"fixed_ips": -1,
"injected_file_content_bytes": 10240,
"injected_file_path_bytes": 255,
"injected_files": 5,
"instances": 9,
"key_pairs": 100,
"metadata_items": 128,
"ram": 51200,
"security_group_rules": 20,
"security_groups": 10
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/versions/ 0000775 0000000 0000000 00000000000 15131732575 0025057 5 ustar 00root root 0000000 0000000 version-show-response.json 0000664 0000000 0000000 00000001400 15131732575 0032165 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/versions {
"versions": [
{
"id": "v3.0",
"links": [
{
"href": "https://docs.openstack.org/",
"rel": "describedby",
"type": "text/html"
},
{
"href": "http://127.0.0.1:44895/v3/",
"rel": "self"
}
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.volume+json;version=3"
}
],
"min_version": "3.0",
"status": "CURRENT",
"updated": "2023-08-31T00:00:00Z",
"version": "3.71"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/versions/versions-response.json 0000664 0000000 0000000 00000001400 15131732575 0031451 0 ustar 00root root 0000000 0000000 {
"versions": [
{
"id": "v3.0",
"links": [
{
"href": "https://docs.openstack.org/",
"rel": "describedby",
"type": "text/html"
},
{
"href": "http://127.0.0.1:45697/v3/",
"rel": "self"
}
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.volume+json;version=3"
}
],
"min_version": "3.0",
"status": "CURRENT",
"updated": "2022-08-31T00:00:00Z",
"version": "3.71"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume-attach-request.json 0000664 0000000 0000000 00000000177 15131732575 0030346 0 ustar 00root root 0000000 0000000 {
"os-attach": {
"instance_uuid": "95D9EF50-507D-11E5-B970-0800200C9A66",
"mountpoint": "/dev/vdc"
}
}
volume-begin-detaching-request.json 0000664 0000000 0000000 00000000041 15131732575 0032021 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"os-begin_detaching": {}
}
volume-bootable-status-update-request.json 0000664 0000000 0000000 00000000076 15131732575 0033411 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"os-set_bootable": {
"bootable": "True"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume-detach-request.json 0000664 0000000 0000000 00000000136 15131732575 0030325 0 ustar 00root root 0000000 0000000 {
"os-detach": {
"attachment_id": "d8777f54-84cf-4809-a679-468ffed56cf1"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume-extend-request.json 0000664 0000000 0000000 00000000063 15131732575 0030363 0 ustar 00root root 0000000 0000000 {
"os-extend": {
"new_size": 3
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume-force-delete-request.json 0000664 0000000 0000000 00000000036 15131732575 0031432 0 ustar 00root root 0000000 0000000 {
"os-force_delete": {}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume-force-detach-request.json 0000664 0000000 0000000 00000000270 15131732575 0031420 0 ustar 00root root 0000000 0000000 {
"os-force_detach": {
"attachment_id": "d8777f54-84cf-4809-a679-468ffed56cf1",
"connector": {
"initiator": "iqn.2012-07.org.fake:01"
}
}
}
volume-image-metadata-set-request.json 0000664 0000000 0000000 00000000422 15131732575 0032445 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"os-set_image_metadata": {
"metadata": {
"image_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c",
"image_name": "image",
"kernel_id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
"ramdisk_id": "somedisk"
}
}
}
volume-image-metadata-unset-request.json 0000664 0000000 0000000 00000000107 15131732575 0033010 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"os-unset_image_metadata": {
"key": "ramdisk_id"
}
}
volume-image-metadata-update-response.json 0000664 0000000 0000000 00000000636 15131732575 0033311 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"metadata": {
"kernel_id": "6ff710d2-942b-4d6b-9168-8c9cc2404ab1",
"container_format": "bare",
"min_ram": "0",
"ramdisk_id": "somedisk",
"disk_format": "qcow2",
"image_name": "image",
"image_id": "5137a025-3c5f-43c1-bc64-5f41270040a5",
"checksum": "f8ab98ff5e73ebab884d80c9dc9c7290",
"min_disk": "0",
"size": "13267968"
}
}
volume-initialize-connection-request.json 0000664 0000000 0000000 00000000525 15131732575 0033316 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"os-initialize_connection": {
"connector": {
"platform":"x86_64",
"host": "node2",
"do_local_attach": false,
"ip": "192.168.13.101",
"os_type": "linux2",
"multipath": false,
"initiator": "iqn.1994-05.com.redhat:d16cbb5d31e5"
}
}
}
volume-manage-list-detail-response.json 0000664 0000000 0000000 00000001202 15131732575 0032620 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"manageable-volumes": [
{
"cinder_id": "9ba5bb53-4a18-4b38-be06-992999da338d",
"reason_not_safe": "already managed",
"reference": {
"source-name": "volume-9ba5bb53-4a18-4b38-be06-992999da338d"
},
"safe_to_manage": false,
"size": 1,
"extra_info": null
},
{
"cinder_id": null,
"reason_not_safe": null,
"reference": {
"source-name": "lvol0"
},
"safe_to_manage": true,
"size": 1,
"extra_info": null
}
]
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume-manage-list-response.json 0000664 0000000 0000000 00000000613 15131732575 0031444 0 ustar 00root root 0000000 0000000 {
"manageable-volumes": [
{
"safe_to_manage": false,
"reference": {
"source-name": "volume-3a81fdac-e8ae-4e61-b6a2-2e14ff316f19"
},
"size": 1
},
{
"safe_to_manage": true,
"reference": {
"source-name": "lvol0"
},
"size": 1
}
]
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume-manage-request-cluster.json0000664 0000000 0000000 00000000711 15131732575 0032003 0 ustar 00root root 0000000 0000000 {
"volume": {
"host": null,
"cluster": "cluster@backend",
"ref": {
"source-name": "existingLV",
"source-id": "1234"
},
"name": "New Volume",
"availability_zone": "az2",
"description": "Volume imported from existingLV",
"volume_type": null,
"bootable": true,
"metadata": {
"key1": "value1",
"key2": "value2"
}
}
}
volume-os-extend_volume_completion-request.json 0000664 0000000 0000000 00000000106 15131732575 0034541 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"os-extend_volume_completion": {
"error": false
}
}
volume-os-migrate_volume-request.json 0000664 0000000 0000000 00000000100 15131732575 0032443 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"os-migrate_volume": {
"host": "node1@lvm"
}
} volume-os-migrate_volume_completion-request.json 0000664 0000000 0000000 00000000205 15131732575 0034702 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"os-migrate_volume_completion": {
"new_volume": "2b955850-f177-45f7-9f49-ecb2c256d161",
"error": false
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume-os-reimage-request.json 0000664 0000000 0000000 00000000174 15131732575 0031127 0 ustar 00root root 0000000 0000000 {
"os-reimage": {
"image_id": "71543ced-a8af-45b6-a5c4-a46282108a90",
"reimage_reserved": false
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume-os-retype-request.json 0000664 0000000 0000000 00000000156 15131732575 0031026 0 ustar 00root root 0000000 0000000 {
"os-retype": {
"new_type": "dedup-tier-replicaton",
"migration_policy": "never"
}
}
volume-readonly-update-request.json 0000664 0000000 0000000 00000000104 15131732575 0032106 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"os-update_readonly_flag": {
"readonly": true
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume-reserve-request.json 0000664 0000000 0000000 00000000027 15131732575 0030547 0 ustar 00root root 0000000 0000000 {
"os-reserve": {}
}
volume-revert-to-snapshot-request.json 0000664 0000000 0000000 00000000127 15131732575 0032602 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"revert": {
"snapshot_id": "5aa119a8-d25b-45a7-8d1b-88e127885635"
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume-roll-detaching-request.json0000664 0000000 0000000 00000000037 15131732575 0031771 0 ustar 00root root 0000000 0000000 {
"os-roll_detaching": {}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume-status-reset-request.json 0000664 0000000 0000000 00000000217 15131732575 0031540 0 ustar 00root root 0000000 0000000 {
"os-reset_status": {
"status": "available",
"attach_status": "detached",
"migration_status": "migrating"
}
}
volume-terminate-connection-request.json 0000664 0000000 0000000 00000000525 15131732575 0033145 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"os-terminate_connection": {
"connector": {
"platform": "x86_64",
"host": "node2",
"do_local_attach": false,
"ip": "192.168.13.101",
"os_type": "linux2",
"multipath": false,
"initiator": "iqn.1994-05.com.redhat:d16cbb5d31e5"
}
}
}
volume-type-access-delete-request.json 0000664 0000000 0000000 00000000135 15131732575 0032475 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples {
"removeProjectAccess": {
"project": "f270b245cb11498ca4031deb7e141cfa"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume-unmanage-request.json 0000664 0000000 0000000 00000000032 15131732575 0030663 0 ustar 00root root 0000000 0000000 {
"os-unmanage": {}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume-unreserve-request.json 0000664 0000000 0000000 00000000032 15131732575 0031106 0 ustar 00root root 0000000 0000000 {
"os-unreserve":{}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_actions/ 0000775 0000000 0000000 00000000000 15131732575 0026236 5 ustar 00root root 0000000 0000000 volume-upload-to-image-request.json 0000664 0000000 0000000 00000000334 15131732575 0035031 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_actions {
"os-volume_upload_image":{
"image_name": "test",
"force": false,
"disk_format": "raw",
"container_format": "bare",
"visibility": "private",
"protected": false
}
} volume-upload-to-image-response.json 0000664 0000000 0000000 00000000737 15131732575 0035206 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_actions {
"os-volume_upload_image": {
"container_format": "bare",
"disk_format": "raw",
"display_description": null,
"id": "3a81fdac-e8ae-4e61-b6a2-2e14ff316f19",
"image_id": "de75b74e-7f0d-4b59-a263-bd87bfc313bd",
"image_name": "test",
"protected": false,
"size": 1,
"status": "uploading",
"updated_at": "2017-06-05T08:44:28.000000",
"visibility": "private",
"volume_type": null
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_manage_extensions/ 0000775 0000000 0000000 00000000000 15131732575 0030305 5 ustar 00root root 0000000 0000000 volume-manage-request.json 0000664 0000000 0000000 00000000662 15131732575 0035350 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_manage_extensions {
"volume": {
"host": "geraint-VirtualBox",
"ref": {
"source-name": "existingLV",
"source-id": "1234"
},
"name": "New Volume",
"availability_zone": "az2",
"description": "Volume imported from existingLV",
"volume_type": null,
"bootable": true,
"metadata": {
"key1": "value1",
"key2": "value2"
}
}
} volume-manage-response.json 0000664 0000000 0000000 00000002132 15131732575 0035510 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_manage_extensions {
"volume": {
"attachments": [],
"availability_zone": "az2",
"bootable": "false",
"created_at": "2014-07-18T00:12:54.000000",
"description": "Volume imported from existingLV",
"encrypted": "false",
"id": "23cf872b-c781-4cd4-847d-5f2ec8cbd91c",
"links": [
{
"href": "http://10.0.2.15:8776/v3/87c8522052ca4eed98bc672b4c1a3ddb/volumes/23cf872b-c781-4cd4-847d-5f2ec8cbd91c",
"rel": "self"
},
{
"href": "http://10.0.2.15:8776/87c8522052ca4eed98bc672b4c1a3ddb/volumes/23cf872b-c781-4cd4-847d-5f2ec8cbd91c",
"rel": "bookmark"
}
],
"metadata": {
"key1": "value1",
"key2": "value2"
},
"name": "New Volume",
"os-vol-tenant-attr:tenant_id": "87c8522052ca4eed98bc672b4c1a3ddb",
"size": 0,
"snapshot_id": "null",
"source_volid": "null",
"status": "creating",
"user_id": "eae1472b5fc5496998a3d06550929e7e",
"volume_type": "null"
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_transfer/ 0000775 0000000 0000000 00000000000 15131732575 0026422 5 ustar 00root root 0000000 0000000 volume-transfer-accept-request.json 0000664 0000000 0000000 00000000101 15131732575 0035302 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_transfer {
"accept": {
"auth_key": "9266c59563c84664"
}
}
volume-transfer-accept-response.json 0000664 0000000 0000000 00000001145 15131732575 0035461 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_transfer {
"transfer": {
"id": "0a840aa1-8f8f-4042-86d7-09d8ca755272",
"links": [
{
"href": "http://127.0.0.1:46057/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/0a840aa1-8f8f-4042-86d7-09d8ca755272",
"rel": "self"
},
{
"href": "http://127.0.0.1:46057/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/0a840aa1-8f8f-4042-86d7-09d8ca755272",
"rel": "bookmark"
}
],
"name": "first volume",
"volume_id": "e56dee53-e565-40f4-9c6b-b983f74a2aa5"
}
} volume-transfer-create-request.json 0000664 0000000 0000000 00000000170 15131732575 0035314 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_transfer {
"transfer": {
"volume_id": "c86b9af4-151d-4ead-b62c-5fb967af0e37",
"name": "first volume"
}
}
volume-transfer-create-response.json 0000664 0000000 0000000 00000001301 15131732575 0035457 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_transfer {
"transfer": {
"auth_key": "dbccabcdbad19e07",
"created_at": "2019-03-20T09:29:46.743632",
"id": "3d26db0c-69cd-42e4-ae42-7552759ab361",
"links": [
{
"href": "http://127.0.0.1:40345/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/3d26db0c-69cd-42e4-ae42-7552759ab361",
"rel": "self"
},
{
"href": "http://127.0.0.1:40345/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/3d26db0c-69cd-42e4-ae42-7552759ab361",
"rel": "bookmark"
}
],
"name": "first volume",
"volume_id": "59fe2097-931b-4ceb-b74b-f862ff3b6277"
}
} volume-transfer-show-response.json 0000664 0000000 0000000 00000001231 15131732575 0035176 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_transfer {
"transfer": {
"created_at": "2019-03-20T09:29:48.732953",
"id": "5055b9c2-527b-47ef-bdd6-62e1130f511f",
"links": [
{
"href": "http://127.0.0.1:41845/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/5055b9c2-527b-47ef-bdd6-62e1130f511f",
"rel": "self"
},
{
"href": "http://127.0.0.1:41845/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/5055b9c2-527b-47ef-bdd6-62e1130f511f",
"rel": "bookmark"
}
],
"name": "first volume",
"volume_id": "8cdd62be-4bea-4b7c-bb53-c0b5424ee2af"
}
} volume-transfers-list-detailed-response.json 0000664 0000000 0000000 00000001346 15131732575 0037134 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_transfer {
"transfers": [
{
"created_at": "2019-03-20T09:29:52.758407",
"id": "1b3f7d49-8fd8-41b8-b2a5-859c5fe71a20",
"links": [
{
"href": "http://127.0.0.1:37479/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/1b3f7d49-8fd8-41b8-b2a5-859c5fe71a20",
"rel": "self"
},
{
"href": "http://127.0.0.1:37479/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/1b3f7d49-8fd8-41b8-b2a5-859c5fe71a20",
"rel": "bookmark"
}
],
"name": "first volume",
"volume_id": "acb5a860-3f17-4c35-9484-394a12dd7dfc"
}
]
} volume-transfers-list-response.json 0000664 0000000 0000000 00000001256 15131732575 0035363 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_transfer {
"transfers": [
{
"id": "a0f13fb9-904c-41c8-8c2e-495cac61a78f",
"links": [
{
"href": "http://127.0.0.1:45017/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/a0f13fb9-904c-41c8-8c2e-495cac61a78f",
"rel": "self"
},
{
"href": "http://127.0.0.1:45017/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/a0f13fb9-904c-41c8-8c2e-495cac61a78f",
"rel": "bookmark"
}
],
"name": "first volume",
"volume_id": "e72d7454-0234-4e3e-99e9-560d1ff79a71"
}
]
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_transfers/ 0000775 0000000 0000000 00000000000 15131732575 0026605 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_transfers/v3.55/ 0000775 0000000 0000000 00000000000 15131732575 0027365 5 ustar 00root root 0000000 0000000 volume-transfers-create-request.json 0000664 0000000 0000000 00000000170 15131732575 0036442 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_transfers/v3.55 {
"transfer": {
"volume_id": "1bb4acc9-9fa4-4b4d-8992-3259b69c8372",
"name": "first volume"
}
}
volume-transfers-create-response.json 0000664 0000000 0000000 00000001340 15131732575 0036610 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_transfers/v3.55 {
"transfer": {
"auth_key": "19244092a5352ebb",
"created_at": "2023-06-12T21:21:38.394873",
"id": "33907fea-976f-4d67-8867-b5382f84eb8c",
"links": [
{
"href": "http://127.0.0.1:45183/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/33907fea-976f-4d67-8867-b5382f84eb8c",
"rel": "self"
},
{
"href": "http://127.0.0.1:45183/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/33907fea-976f-4d67-8867-b5382f84eb8c",
"rel": "bookmark"
}
],
"name": "first volume",
"no_snapshots": false,
"volume_id": "31024287-e368-4b2c-85a4-880b3b6fc8b0"
}
} volume-transfers-show-response.json 0000664 0000000 0000000 00000001270 15131732575 0036327 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_transfers/v3.55 {
"transfer": {
"created_at": "2023-06-22T08:28:14.618343",
"id": "16b47e50-ab70-4781-bc01-cdcc01ca264a",
"links": [
{
"href": "http://127.0.0.1:38399/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/16b47e50-ab70-4781-bc01-cdcc01ca264a",
"rel": "self"
},
{
"href": "http://127.0.0.1:38399/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/16b47e50-ab70-4781-bc01-cdcc01ca264a",
"rel": "bookmark"
}
],
"name": "first volume",
"no_snapshots": false,
"volume_id": "a67e4027-4a83-4b80-a2d5-5b49650ac28c"
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_transfers/v3.57/ 0000775 0000000 0000000 00000000000 15131732575 0027367 5 ustar 00root root 0000000 0000000 volume-transfers-create-request.json 0000664 0000000 0000000 00000000227 15131732575 0036447 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_transfers/v3.57 {
"transfer": {
"volume_id": "80d68197-b67e-4c8e-bbb9-030b2581f921",
"name": "first volume",
"no_snapshots": false
}
}
volume-transfers-create-response.json 0000664 0000000 0000000 00000001550 15131732575 0036615 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_transfers/v3.57 {
"transfer": {
"accepted": false,
"auth_key": "e2cb02466324813c",
"created_at": "2023-06-12T21:21:38.392033",
"destination_project_id": null,
"id": "94bae1a0-83fb-496c-9cd2-800d8237ab0d",
"links": [
{
"href": "http://127.0.0.1:45193/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/94bae1a0-83fb-496c-9cd2-800d8237ab0d",
"rel": "self"
},
{
"href": "http://127.0.0.1:45193/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/94bae1a0-83fb-496c-9cd2-800d8237ab0d",
"rel": "bookmark"
}
],
"name": "first volume",
"no_snapshots": false,
"source_project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"volume_id": "202eead8-3c82-41e1-914f-83638a063be9"
}
} volume-transfers-show-response.json 0000664 0000000 0000000 00000001500 15131732575 0036325 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_transfers/v3.57 {
"transfer": {
"accepted": false,
"created_at": "2023-06-22T08:28:17.647081",
"destination_project_id": null,
"id": "3d79fbda-8d9c-4da3-a016-e5612fcb7f65",
"links": [
{
"href": "http://127.0.0.1:34593/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/3d79fbda-8d9c-4da3-a016-e5612fcb7f65",
"rel": "self"
},
{
"href": "http://127.0.0.1:34593/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/3d79fbda-8d9c-4da3-a016-e5612fcb7f65",
"rel": "bookmark"
}
],
"name": "first volume",
"no_snapshots": false,
"source_project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"volume_id": "7e31e409-2a7a-4ea6-aa0b-bc7be056fc57"
}
} volume-transfers-accept-request.json 0000664 0000000 0000000 00000000100 15131732575 0035647 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_transfers {
"accept": {
"auth_key": "f318375a4400391e"
}
} volume-transfers-accept-response.json 0000664 0000000 0000000 00000001145 15131732575 0036027 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_transfers {
"transfer": {
"id": "9e395d6d-5138-423c-a63c-7b62c6265fa1",
"links": [
{
"href": "http://127.0.0.1:39369/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/9e395d6d-5138-423c-a63c-7b62c6265fa1",
"rel": "self"
},
{
"href": "http://127.0.0.1:39369/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/9e395d6d-5138-423c-a63c-7b62c6265fa1",
"rel": "bookmark"
}
],
"name": "first volume",
"volume_id": "8d19f929-f1da-4a76-acad-9ed17da0981e"
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_type/ 0000775 0000000 0000000 00000000000 15131732575 0025557 5 ustar 00root root 0000000 0000000 encryption-type-create-request.json 0000664 0000000 0000000 00000000236 15131732575 0034474 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_type {
"encryption":{
"key_size": 256,
"provider": "luks",
"control_location":"front-end",
"cipher": "aes-xts-plain64"
}
}
encryption-type-create-response.json 0000664 0000000 0000000 00000000443 15131732575 0034642 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_type {
"encryption": {
"volume_type_id": "2d29462d-76cb-417c-8a9f-fb23140f1577",
"control_location": "front-end",
"encryption_id": "81e069c6-7394-4856-8df7-3b237ca61f74",
"key_size": 256,
"provider": "luks",
"cipher": "aes-xts-plain64"
}
}
encryption-type-show-response.json 0000664 0000000 0000000 00000000547 15131732575 0034364 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_type {
"volume_type_id": "2d29462d-76cb-417c-8a9f-fb23140f1577",
"control_location": "front-end",
"deleted": false,
"created_at": "2016-12-28T02:32:25.000000",
"updated_at": null,
"encryption_id": "81e069c6-7394-4856-8df7-3b237ca61f74",
"key_size": 256,
"provider": "luks",
"deleted_at": null,
"cipher": "aes-xts-plain64"
}
encryption-type-specific-specs-show-response.json 0000664 0000000 0000000 00000000044 15131732575 0037252 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_type {
"cipher": "aes-xts-plain64"
}
encryption-type-update-request.json 0000664 0000000 0000000 00000000234 15131732575 0034511 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_type {
"encryption":{
"key_size": 64,
"provider": "luks",
"control_location":"back-end",
"cipher": "aes-xts-plain64"
}
}
encryption-type-update-response.json 0000664 0000000 0000000 00000000234 15131732575 0034657 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_type {
"encryption":{
"key_size": 64,
"provider": "luks",
"control_location":"back-end",
"cipher": "aes-xts-plain64"
}
}
volume-type-access-add-request.json 0000664 0000000 0000000 00000000131 15131732575 0034327 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_type {
"addProjectAccess": {
"project": "6f70656e737461636b20342065766572"
}
} volume-type-access-list-response.json 0000664 0000000 0000000 00000000274 15131732575 0034730 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_type {
"volume_type_access": [
{
"project_id": "6f70656e737461636b20342065766572",
"volume_type_id": "a5082c24-2a27-43a4-b48e-fcec1240e36b"
}
]
} volume-type-all-extra-specs-show-response.json 0000664 0000000 0000000 00000000075 15131732575 0036477 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_type {
"extra_specs": {
"capabilities": "gpu"
}
}
volume-type-create-request.json 0000664 0000000 0000000 00000000340 15131732575 0033605 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_type {
"volume_type": {
"name": "vol-type-001",
"description": "volume type 0001",
"os-volume-type-access:is_public": true,
"extra_specs": {
"capabilities": "gpu"
}
}
}
volume-type-create-response.json 0000664 0000000 0000000 00000000461 15131732575 0033757 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_type {
"volume_type": {
"name": "vol-type-001",
"extra_specs": {
"capabilities": "gpu"
},
"os-volume-type-access:is_public": true,
"is_public": true,
"id": "6d0ff92a-0007-4780-9ece-acfe5876966a",
"description": "volume type 0001"
}
}
volume-type-default-response.json 0000664 0000000 0000000 00000000436 15131732575 0034142 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_type {
"volume_type": {
"id": "6685584b-1eac-4da6-b5c3-555430cf68ff",
"qos_specs_id": null,
"name": "vol-type-001",
"description": "volume type 0001",
"is_public": true,
"extra_specs": {
"capabilities": "gpu"
}
}
}
volume-type-extra-specs-create-update-request.json 0000664 0000000 0000000 00000000122 15131732575 0037317 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_type {
"extra_specs": {
"key1": "value1",
"key2": "value2"
}
}
volume-type-extra-specs-create-update-response.json 0000664 0000000 0000000 00000000122 15131732575 0037465 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_type {
"extra_specs": {
"key1": "value1",
"key2": "value2"
}
}
volume-type-show-response.json 0000664 0000000 0000000 00000000517 15131732575 0033476 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_type {
"volume_type": {
"id": "6685584b-1eac-4da6-b5c3-555430cf68ff",
"qos_specs_id": null,
"name": "vol-type-001",
"description": "volume type 0001",
"os-volume-type-access:is_public": true,
"is_public": true,
"extra_specs": {
"capabilities": "gpu"
}
}
}
volume-type-specific-extra-specs-show-response.json 0000664 0000000 0000000 00000000036 15131732575 0037511 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_type {
"capabilities": "gpu"
}
volume-type-specific-extra-specs-update-request.json 0000664 0000000 0000000 00000000031 15131732575 0037640 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_type {
"key1": "value1"
}
volume-type-specific-extra-specs-update-response.json 0000664 0000000 0000000 00000000031 15131732575 0040006 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_type {
"key1": "value1"
}
volume-type-update-request.json 0000664 0000000 0000000 00000000204 15131732575 0033623 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_type {
"volume_type": {
"name": "vol-type-001",
"description": "volume type 0001",
"is_public": true
}
}
volume-type-update-response.json 0000664 0000000 0000000 00000000400 15131732575 0033767 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_type {
"volume_type": {
"id": "6685584b-1eac-4da6-b5c3-555430cf68ff",
"name": "vol-type-001",
"description": "volume type 0001",
"is_public": true,
"extra_specs": {
"capabilities": "gpu"
}
}
}
volume-types-list-response.json 0000664 0000000 0000000 00000002050 15131732575 0033646 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volume_type {
"volume_types": [
{
"description": "volume type 0002",
"extra_specs": {
"capabilities": "gpu"
},
"id": "ef512777-6552-4013-82f0-57a96e5804b7",
"is_public": true,
"name": "vol-type-002",
"os-volume-type-access:is_public": true,
"qos_specs_id": null
},
{
"description": "volume type 0001",
"extra_specs": {
"capabilities": "gpu"
},
"id": "18947ff2-ad57-42b2-9350-34262e530203",
"is_public": true,
"name": "vol-type-001",
"os-volume-type-access:is_public": true,
"qos_specs_id": null
},
{
"description": "Default Volume Type",
"extra_specs": {},
"id": "7a56b996-b73f-4233-9f00-dd6a68b49b27",
"is_public": true,
"name": "__DEFAULT__",
"os-volume-type-access:is_public": true,
"qos_specs_id": null
}
]
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/ 0000775 0000000 0000000 00000000000 15131732575 0024701 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.13/ 0000775 0000000 0000000 00000000000 15131732575 0025453 5 ustar 00root root 0000000 0000000 volume-create-response.json 0000664 0000000 0000000 00000002166 15131732575 0032700 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.13 {
"volume": {
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-28T06:21:12.715987",
"description": null,
"encrypted": false,
"id": "2b955850-f177-45f7-9f49-ecb2c256d161",
"links": [
{
"href": "http://127.0.0.1:33951/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161",
"rel": "self"
},
{
"href": "http://127.0.0.1:33951/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161",
"rel": "bookmark"
}
],
"metadata": {},
"migration_status": null,
"multiattach": false,
"name": null,
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"group_id": null
}
}
volume-show-response.json 0000664 0000000 0000000 00000002515 15131732575 0032413 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.13 {
"volume": {
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-29T06:50:07.770785",
"description": null,
"encrypted": false,
"id": "f7223234-1afc-4d19-bfa3-d19deb6235ef",
"links": [
{
"href": "http://127.0.0.1:45839/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef",
"rel": "self"
},
{
"href": "http://127.0.0.1:45839/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef",
"rel": "bookmark"
}
],
"metadata": {},
"migration_status": null,
"multiattach": false,
"name": null,
"os-vol-host-attr:host": null,
"os-vol-mig-status-attr:migstat": null,
"os-vol-mig-status-attr:name_id": null,
"os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"group_id": null
}
}
volume-update-response.json 0000664 0000000 0000000 00000002276 15131732575 0032721 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.13 {
"volume": {
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-29T06:59:23.679903",
"description": "This is yet, another volume.",
"encrypted": false,
"id": "8b2459d1-0059-4e14-a89f-dfa73a452af6",
"links": [
{
"href": "http://127.0.0.1:41467/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6",
"rel": "self"
},
{
"href": "http://127.0.0.1:41467/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6",
"rel": "bookmark"
}
],
"metadata": {
"name": "metadata0"
},
"migration_status": null,
"multiattach": false,
"name": "vol-003",
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"group_id": null
}
}
volumes-list-detailed-response.json 0000664 0000000 0000000 00000002756 15131732575 0034351 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.13 {
"volumes": [
{
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-28T06:25:15.288987",
"description": null,
"encrypted": false,
"id": "cb49b381-9012-40cb-b8ee-80c19a4801b5",
"links": [
{
"href": "http://127.0.0.1:43543/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5",
"rel": "self"
},
{
"href": "http://127.0.0.1:43543/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5",
"rel": "bookmark"
}
],
"metadata": {},
"migration_status": null,
"multiattach": false,
"name": null,
"os-vol-host-attr:host": null,
"os-vol-mig-status-attr:migstat": null,
"os-vol-mig-status-attr:name_id": null,
"os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"group_id": null
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.21/ 0000775 0000000 0000000 00000000000 15131732575 0025452 5 ustar 00root root 0000000 0000000 volume-create-response.json 0000664 0000000 0000000 00000002223 15131732575 0032671 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.21 {
"volume": {
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-28T06:21:12.715987",
"description": null,
"encrypted": false,
"id": "2b955850-f177-45f7-9f49-ecb2c256d161",
"links": [
{
"href": "http://127.0.0.1:33951/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161",
"rel": "self"
},
{
"href": "http://127.0.0.1:33951/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161",
"rel": "bookmark"
}
],
"metadata": {},
"migration_status": null,
"multiattach": false,
"name": null,
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"group_id": null,
"provider_id": null
}
}
volume-show-response.json 0000664 0000000 0000000 00000002552 15131732575 0032413 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.21 {
"volume": {
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-29T06:50:07.770785",
"description": null,
"encrypted": false,
"id": "f7223234-1afc-4d19-bfa3-d19deb6235ef",
"links": [
{
"href": "http://127.0.0.1:45839/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef",
"rel": "self"
},
{
"href": "http://127.0.0.1:45839/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef",
"rel": "bookmark"
}
],
"metadata": {},
"migration_status": null,
"multiattach": false,
"name": null,
"os-vol-host-attr:host": null,
"os-vol-mig-status-attr:migstat": null,
"os-vol-mig-status-attr:name_id": null,
"os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"group_id": null,
"provider_id": null
}
}
volume-update-response.json 0000664 0000000 0000000 00000002333 15131732575 0032712 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.21 {
"volume": {
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-29T06:59:23.679903",
"description": "This is yet, another volume.",
"encrypted": false,
"id": "8b2459d1-0059-4e14-a89f-dfa73a452af6",
"links": [
{
"href": "http://127.0.0.1:41467/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6",
"rel": "self"
},
{
"href": "http://127.0.0.1:41467/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6",
"rel": "bookmark"
}
],
"metadata": {
"name": "metadata0"
},
"migration_status": null,
"multiattach": false,
"name": "vol-003",
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"group_id": null,
"provider_id": null
}
}
volumes-list-detailed-response.json 0000664 0000000 0000000 00000003017 15131732575 0034337 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.21 {
"volumes": [
{
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-28T06:25:15.288987",
"description": null,
"encrypted": false,
"id": "cb49b381-9012-40cb-b8ee-80c19a4801b5",
"links": [
{
"href": "http://127.0.0.1:43543/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5",
"rel": "self"
},
{
"href": "http://127.0.0.1:43543/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5",
"rel": "bookmark"
}
],
"metadata": {},
"migration_status": null,
"multiattach": false,
"name": null,
"os-vol-host-attr:host": null,
"os-vol-mig-status-attr:migstat": null,
"os-vol-mig-status-attr:name_id": null,
"os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"group_id": null,
"provider_id": null
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.48/ 0000775 0000000 0000000 00000000000 15131732575 0025463 5 ustar 00root root 0000000 0000000 volume-create-response.json 0000664 0000000 0000000 00000002321 15131732575 0032701 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.48 {
"volume": {
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-28T06:21:12.715987",
"description": null,
"encrypted": false,
"id": "2b955850-f177-45f7-9f49-ecb2c256d161",
"links": [
{
"href": "http://127.0.0.1:33951/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161",
"rel": "self"
},
{
"href": "http://127.0.0.1:33951/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161",
"rel": "bookmark"
}
],
"metadata": {},
"migration_status": null,
"multiattach": false,
"name": null,
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"group_id": null,
"provider_id": null,
"service_uuid": null,
"shared_targets": true
}
}
volume-show-response.json 0000664 0000000 0000000 00000002650 15131732575 0032423 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.48 {
"volume": {
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-29T06:50:07.770785",
"description": null,
"encrypted": false,
"id": "f7223234-1afc-4d19-bfa3-d19deb6235ef",
"links": [
{
"href": "http://127.0.0.1:45839/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef",
"rel": "self"
},
{
"href": "http://127.0.0.1:45839/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef",
"rel": "bookmark"
}
],
"metadata": {},
"migration_status": null,
"multiattach": false,
"name": null,
"os-vol-host-attr:host": null,
"os-vol-mig-status-attr:migstat": null,
"os-vol-mig-status-attr:name_id": null,
"os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"provider_id": null,
"group_id": null,
"service_uuid": null,
"shared_targets": true
}
}
volume-update-response.json 0000664 0000000 0000000 00000002431 15131732575 0032722 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.48 {
"volume": {
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-29T06:59:23.679903",
"description": "This is yet, another volume.",
"encrypted": false,
"id": "8b2459d1-0059-4e14-a89f-dfa73a452af6",
"links": [
{
"href": "http://127.0.0.1:41467/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6",
"rel": "self"
},
{
"href": "http://127.0.0.1:41467/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6",
"rel": "bookmark"
}
],
"metadata": {
"name": "metadata0"
},
"migration_status": null,
"multiattach": false,
"name": "vol-003",
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"group_id": null,
"provider_id": null,
"service_uuid": null,
"shared_targets": true
}
}
volumes-list-detailed-response.json 0000664 0000000 0000000 00000003125 15131732575 0034350 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.48 {
"volumes": [
{
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-28T06:25:15.288987",
"description": null,
"encrypted": false,
"id": "cb49b381-9012-40cb-b8ee-80c19a4801b5",
"links": [
{
"href": "http://127.0.0.1:43543/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5",
"rel": "self"
},
{
"href": "http://127.0.0.1:43543/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5",
"rel": "bookmark"
}
],
"metadata": {},
"migration_status": null,
"multiattach": false,
"name": null,
"os-vol-host-attr:host": null,
"os-vol-mig-status-attr:migstat": null,
"os-vol-mig-status-attr:name_id": null,
"os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"provider_id": null,
"group_id": null,
"service_uuid": null,
"shared_targets": true
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.61/ 0000775 0000000 0000000 00000000000 15131732575 0025456 5 ustar 00root root 0000000 0000000 volume-create-response.json 0000664 0000000 0000000 00000002357 15131732575 0032705 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.61 {
"volume": {
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-28T06:21:12.715987",
"description": null,
"encrypted": false,
"id": "2b955850-f177-45f7-9f49-ecb2c256d161",
"links": [
{
"href": "http://127.0.0.1:33951/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161",
"rel": "self"
},
{
"href": "http://127.0.0.1:33951/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161",
"rel": "bookmark"
}
],
"metadata": {},
"migration_status": null,
"multiattach": false,
"name": null,
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"group_id": null,
"provider_id": null,
"service_uuid": null,
"shared_targets": true,
"cluster_name": null
}
}
volume-show-response.json 0000664 0000000 0000000 00000002706 15131732575 0032420 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.61 {
"volume": {
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-29T06:50:07.770785",
"description": null,
"encrypted": false,
"id": "f7223234-1afc-4d19-bfa3-d19deb6235ef",
"links": [
{
"href": "http://127.0.0.1:45839/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef",
"rel": "self"
},
{
"href": "http://127.0.0.1:45839/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef",
"rel": "bookmark"
}
],
"metadata": {},
"migration_status": null,
"multiattach": false,
"name": null,
"os-vol-host-attr:host": null,
"os-vol-mig-status-attr:migstat": null,
"os-vol-mig-status-attr:name_id": null,
"os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"provider_id": null,
"group_id": null,
"service_uuid": null,
"shared_targets": true,
"cluster_name": null
}
}
volume-update-response.json 0000664 0000000 0000000 00000002467 15131732575 0032726 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.61 {
"volume": {
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-29T06:59:23.679903",
"description": "This is yet, another volume.",
"encrypted": false,
"id": "8b2459d1-0059-4e14-a89f-dfa73a452af6",
"links": [
{
"href": "http://127.0.0.1:41467/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6",
"rel": "self"
},
{
"href": "http://127.0.0.1:41467/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6",
"rel": "bookmark"
}
],
"metadata": {
"name": "metadata0"
},
"migration_status": null,
"multiattach": false,
"name": "vol-003",
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"group_id": null,
"provider_id": null,
"service_uuid": null,
"shared_targets": true,
"cluster_name": null
}
}
volumes-list-detailed-response.json 0000664 0000000 0000000 00000003167 15131732575 0034351 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.61 {
"volumes": [
{
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-28T06:25:15.288987",
"description": null,
"encrypted": false,
"id": "cb49b381-9012-40cb-b8ee-80c19a4801b5",
"links": [
{
"href": "http://127.0.0.1:43543/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5",
"rel": "self"
},
{
"href": "http://127.0.0.1:43543/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5",
"rel": "bookmark"
}
],
"metadata": {},
"migration_status": null,
"multiattach": false,
"name": null,
"os-vol-host-attr:host": null,
"os-vol-mig-status-attr:migstat": null,
"os-vol-mig-status-attr:name_id": null,
"os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"provider_id": null,
"group_id": null,
"service_uuid": null,
"shared_targets": true,
"cluster_name": null
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.63/ 0000775 0000000 0000000 00000000000 15131732575 0025460 5 ustar 00root root 0000000 0000000 volume-create-response.json 0000664 0000000 0000000 00000002461 15131732575 0032703 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.63 {
"volume": {
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-28T06:21:12.715987",
"description": null,
"encrypted": false,
"id": "2b955850-f177-45f7-9f49-ecb2c256d161",
"links": [
{
"href": "http://127.0.0.1:33951/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161",
"rel": "self"
},
{
"href": "http://127.0.0.1:33951/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161",
"rel": "bookmark"
}
],
"metadata": {},
"migration_status": null,
"multiattach": false,
"name": null,
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"group_id": null,
"provider_id": null,
"service_uuid": null,
"shared_targets": true,
"cluster_name": null,
"volume_type_id": "5fed9d7c-401d-46e2-8e80-f30c70cb7e1d"
}
}
volume-show-response.json 0000664 0000000 0000000 00000003010 15131732575 0032407 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.63 {
"volume": {
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-29T06:50:07.770785",
"description": null,
"encrypted": false,
"id": "f7223234-1afc-4d19-bfa3-d19deb6235ef",
"links": [
{
"href": "http://127.0.0.1:45839/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef",
"rel": "self"
},
{
"href": "http://127.0.0.1:45839/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef",
"rel": "bookmark"
}
],
"metadata": {},
"migration_status": null,
"multiattach": false,
"name": null,
"os-vol-host-attr:host": null,
"os-vol-mig-status-attr:migstat": null,
"os-vol-mig-status-attr:name_id": null,
"os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"provider_id": null,
"group_id": null,
"service_uuid": null,
"shared_targets": true,
"cluster_name": null,
"volume_type_id": "5fed9d7c-401d-46e2-8e80-f30c70cb7e1d"
}
}
volume-update-response.json 0000664 0000000 0000000 00000002571 15131732575 0032724 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.63 {
"volume": {
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-29T06:59:23.679903",
"description": "This is yet, another volume.",
"encrypted": false,
"id": "8b2459d1-0059-4e14-a89f-dfa73a452af6",
"links": [
{
"href": "http://127.0.0.1:41467/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6",
"rel": "self"
},
{
"href": "http://127.0.0.1:41467/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6",
"rel": "bookmark"
}
],
"metadata": {
"name": "metadata0"
},
"migration_status": null,
"multiattach": false,
"name": "vol-003",
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"group_id": null,
"provider_id": null,
"service_uuid": null,
"shared_targets": true,
"cluster_name": null,
"volume_type_id": "5fed9d7c-401d-46e2-8e80-f30c70cb7e1d"
}
}
volumes-list-detailed-response.json 0000664 0000000 0000000 00000003275 15131732575 0034353 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.63 {
"volumes": [
{
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-28T06:25:15.288987",
"description": null,
"encrypted": false,
"id": "cb49b381-9012-40cb-b8ee-80c19a4801b5",
"links": [
{
"href": "http://127.0.0.1:43543/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5",
"rel": "self"
},
{
"href": "http://127.0.0.1:43543/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5",
"rel": "bookmark"
}
],
"metadata": {},
"migration_status": null,
"multiattach": false,
"name": null,
"os-vol-host-attr:host": null,
"os-vol-mig-status-attr:migstat": null,
"os-vol-mig-status-attr:name_id": null,
"os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"provider_id": null,
"group_id": null,
"service_uuid": null,
"shared_targets": true,
"cluster_name": null,
"volume_type_id": "5fed9d7c-401d-46e2-8e80-f30c70cb7e1d"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.65/ 0000775 0000000 0000000 00000000000 15131732575 0025462 5 ustar 00root root 0000000 0000000 volume-create-response.json 0000664 0000000 0000000 00000002521 15131732575 0032702 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.65 {
"volume": {
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-28T06:21:12.715987",
"description": null,
"encrypted": false,
"id": "2b955850-f177-45f7-9f49-ecb2c256d161",
"links": [
{
"href": "http://127.0.0.1:33951/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161",
"rel": "self"
},
{
"href": "http://127.0.0.1:33951/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161",
"rel": "bookmark"
}
],
"metadata": {},
"migration_status": null,
"multiattach": false,
"name": null,
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"group_id": null,
"provider_id": null,
"service_uuid": null,
"shared_targets": true,
"cluster_name": null,
"volume_type_id": "5fed9d7c-401d-46e2-8e80-f30c70cb7e1d",
"consumes_quota": true
}
}
volume-show-response.json 0000664 0000000 0000000 00000003050 15131732575 0032415 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.65 {
"volume": {
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-29T06:50:07.770785",
"description": null,
"encrypted": false,
"id": "f7223234-1afc-4d19-bfa3-d19deb6235ef",
"links": [
{
"href": "http://127.0.0.1:45839/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef",
"rel": "self"
},
{
"href": "http://127.0.0.1:45839/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef",
"rel": "bookmark"
}
],
"metadata": {},
"migration_status": null,
"multiattach": false,
"name": null,
"os-vol-host-attr:host": null,
"os-vol-mig-status-attr:migstat": null,
"os-vol-mig-status-attr:name_id": null,
"os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"provider_id": null,
"group_id": null,
"service_uuid": null,
"shared_targets": true,
"cluster_name": null,
"volume_type_id": "5fed9d7c-401d-46e2-8e80-f30c70cb7e1d",
"consumes_quota": true
}
}
volume-update-response.json 0000664 0000000 0000000 00000002631 15131732575 0032723 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.65 {
"volume": {
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-29T06:59:23.679903",
"description": "This is yet, another volume.",
"encrypted": false,
"id": "8b2459d1-0059-4e14-a89f-dfa73a452af6",
"links": [
{
"href": "http://127.0.0.1:41467/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6",
"rel": "self"
},
{
"href": "http://127.0.0.1:41467/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6",
"rel": "bookmark"
}
],
"metadata": {
"name": "metadata0"
},
"migration_status": null,
"multiattach": false,
"name": "vol-003",
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"group_id": null,
"provider_id": null,
"service_uuid": null,
"shared_targets": true,
"cluster_name": null,
"volume_type_id": "5fed9d7c-401d-46e2-8e80-f30c70cb7e1d",
"consumes_quota": true
}
}
volumes-list-detailed-response.json 0000664 0000000 0000000 00000003341 15131732575 0034347 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.65 {
"volumes": [
{
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-28T06:25:15.288987",
"description": null,
"encrypted": false,
"id": "cb49b381-9012-40cb-b8ee-80c19a4801b5",
"links": [
{
"href": "http://127.0.0.1:43543/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5",
"rel": "self"
},
{
"href": "http://127.0.0.1:43543/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5",
"rel": "bookmark"
}
],
"metadata": {},
"migration_status": null,
"multiattach": false,
"name": null,
"os-vol-host-attr:host": null,
"os-vol-mig-status-attr:migstat": null,
"os-vol-mig-status-attr:name_id": null,
"os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"volume_type_id": "5fed9d7c-401d-46e2-8e80-f30c70cb7e1d",
"provider_id": null,
"group_id": null,
"service_uuid": null,
"shared_targets": true,
"cluster_name": null,
"consumes_quota": true
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.69/ 0000775 0000000 0000000 00000000000 15131732575 0025466 5 ustar 00root root 0000000 0000000 volume-create-response.json 0000664 0000000 0000000 00000002521 15131732575 0032706 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.69 {
"volume": {
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-28T06:21:12.715987",
"description": null,
"encrypted": false,
"id": "2b955850-f177-45f7-9f49-ecb2c256d161",
"links": [
{
"href": "http://127.0.0.1:33951/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161",
"rel": "self"
},
{
"href": "http://127.0.0.1:33951/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161",
"rel": "bookmark"
}
],
"metadata": {},
"migration_status": null,
"multiattach": false,
"name": null,
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"group_id": null,
"provider_id": null,
"service_uuid": null,
"shared_targets": null,
"cluster_name": null,
"volume_type_id": "5fed9d7c-401d-46e2-8e80-f30c70cb7e1d",
"consumes_quota": true
}
}
volume-show-response.json 0000664 0000000 0000000 00000003050 15131732575 0032421 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.69 {
"volume": {
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-29T06:50:07.770785",
"description": null,
"encrypted": false,
"id": "f7223234-1afc-4d19-bfa3-d19deb6235ef",
"links": [
{
"href": "http://127.0.0.1:45839/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef",
"rel": "self"
},
{
"href": "http://127.0.0.1:45839/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef",
"rel": "bookmark"
}
],
"metadata": {},
"migration_status": null,
"multiattach": false,
"name": null,
"os-vol-host-attr:host": null,
"os-vol-mig-status-attr:migstat": null,
"os-vol-mig-status-attr:name_id": null,
"os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"provider_id": null,
"group_id": null,
"service_uuid": null,
"shared_targets": null,
"cluster_name": null,
"volume_type_id": "5fed9d7c-401d-46e2-8e80-f30c70cb7e1d",
"consumes_quota": true
}
}
volume-update-response.json 0000664 0000000 0000000 00000002631 15131732575 0032727 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.69 {
"volume": {
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-29T06:59:23.679903",
"description": "This is yet, another volume.",
"encrypted": false,
"id": "8b2459d1-0059-4e14-a89f-dfa73a452af6",
"links": [
{
"href": "http://127.0.0.1:41467/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6",
"rel": "self"
},
{
"href": "http://127.0.0.1:41467/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6",
"rel": "bookmark"
}
],
"metadata": {
"name": "metadata0"
},
"migration_status": null,
"multiattach": false,
"name": "vol-003",
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"group_id": null,
"provider_id": null,
"service_uuid": null,
"shared_targets": null,
"cluster_name": null,
"volume_type_id": "5fed9d7c-401d-46e2-8e80-f30c70cb7e1d",
"consumes_quota": true
}
}
volumes-list-detailed-response.json 0000664 0000000 0000000 00000003341 15131732575 0034353 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/v3.69 {
"volumes": [
{
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-28T06:25:15.288987",
"description": null,
"encrypted": false,
"id": "cb49b381-9012-40cb-b8ee-80c19a4801b5",
"links": [
{
"href": "http://127.0.0.1:43543/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5",
"rel": "self"
},
{
"href": "http://127.0.0.1:43543/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5",
"rel": "bookmark"
}
],
"metadata": {},
"migration_status": null,
"multiattach": false,
"name": null,
"os-vol-host-attr:host": null,
"os-vol-mig-status-attr:migstat": null,
"os-vol-mig-status-attr:name_id": null,
"os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__",
"volume_type_id": "5fed9d7c-401d-46e2-8e80-f30c70cb7e1d",
"provider_id": null,
"group_id": null,
"service_uuid": null,
"shared_targets": null,
"cluster_name": null,
"consumes_quota": true
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/volume-create-request.json0000664 0000000 0000000 00000001035 15131732575 0032031 0 ustar 00root root 0000000 0000000 {
"volume": {
"size": 10,
"availability_zone": null,
"source_volid": null,
"description": null,
"multiattach": false,
"snapshot_id": null,
"backup_id": null,
"name": null,
"imageRef": null,
"volume_type": null,
"metadata": {},
"consistencygroup_id": null
},
"OS-SCH-HNT:scheduler_hints": {
"same_host": [
"a0cf03a5-d921-4877-bb5c-86d26cf818e1",
"8c19174f-4220-44f0-824a-cd1eeef10287"
]
}
} volume-create-response.json 0000664 0000000 0000000 00000002134 15131732575 0032121 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes {
"volume": {
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-28T06:21:12.715987",
"description": null,
"encrypted": false,
"id": "2b955850-f177-45f7-9f49-ecb2c256d161",
"links": [
{
"href": "http://127.0.0.1:33951/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161",
"rel": "self"
},
{
"href": "http://127.0.0.1:33951/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161",
"rel": "bookmark"
}
],
"metadata": {},
"migration_status": null,
"multiattach": false,
"name": null,
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__"
}
}
volume-metadata-create-request.json 0000664 0000000 0000000 00000000067 15131732575 0033534 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes {
"metadata": {
"name": "metadata0"
}
} volume-metadata-create-response.json 0000664 0000000 0000000 00000000067 15131732575 0033702 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes {
"metadata": {
"name": "metadata0"
}
} volume-metadata-show-key-response.json 0000664 0000000 0000000 00000000063 15131732575 0034201 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes {
"meta": {
"name": "metadata1"
}
} volume-metadata-show-response.json 0000664 0000000 0000000 00000000026 15131732575 0033412 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes {
"metadata": {}
} volume-metadata-update-key-request.json 0000664 0000000 0000000 00000000062 15131732575 0034334 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes {
"meta": {
"name": "new_name"
}
} volume-metadata-update-key-response.json 0000664 0000000 0000000 00000000062 15131732575 0034502 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes {
"meta": {
"name": "new_name"
}
} volume-metadata-update-request.json 0000664 0000000 0000000 00000000067 15131732575 0033553 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes {
"metadata": {
"name": "metadata1"
}
} volume-metadata-update-response.json 0000664 0000000 0000000 00000000067 15131732575 0033721 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes {
"metadata": {
"name": "metadata1"
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/volume-show-response.json 0000664 0000000 0000000 00000002463 15131732575 0031722 0 ustar 00root root 0000000 0000000 {
"volume": {
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-29T06:50:07.770785",
"description": null,
"encrypted": false,
"id": "f7223234-1afc-4d19-bfa3-d19deb6235ef",
"links": [
{
"href": "http://127.0.0.1:45839/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef",
"rel": "self"
},
{
"href": "http://127.0.0.1:45839/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef",
"rel": "bookmark"
}
],
"metadata": {},
"migration_status": null,
"multiattach": false,
"name": null,
"os-vol-host-attr:host": null,
"os-vol-mig-status-attr:migstat": null,
"os-vol-mig-status-attr:name_id": null,
"os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__"
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/volume-update-request.json0000664 0000000 0000000 00000000253 15131732575 0032051 0 ustar 00root root 0000000 0000000 {
"volume": {
"name": "vol-003",
"description": "This is yet, another volume.",
"metadata": {
"name": "metadata0"
}
}
} volume-update-response.json 0000664 0000000 0000000 00000002244 15131732575 0032142 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes {
"volume": {
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-29T06:59:23.679903",
"description": "This is yet, another volume.",
"encrypted": false,
"id": "8b2459d1-0059-4e14-a89f-dfa73a452af6",
"links": [
{
"href": "http://127.0.0.1:41467/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6",
"rel": "self"
},
{
"href": "http://127.0.0.1:41467/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6",
"rel": "bookmark"
}
],
"metadata": {
"name": "metadata0"
},
"migration_status": null,
"multiattach": false,
"name": "vol-003",
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__"
}
}
volumes-list-detailed-response.json 0000664 0000000 0000000 00000002720 15131732575 0033566 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes {
"volumes": [
{
"attachments": [],
"availability_zone": "nova",
"bootable": "false",
"consistencygroup_id": null,
"created_at": "2018-11-28T06:25:15.288987",
"description": null,
"encrypted": false,
"id": "cb49b381-9012-40cb-b8ee-80c19a4801b5",
"links": [
{
"href": "http://127.0.0.1:43543/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5",
"rel": "self"
},
{
"href": "http://127.0.0.1:43543/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5",
"rel": "bookmark"
}
],
"metadata": {},
"migration_status": null,
"multiattach": false,
"name": null,
"os-vol-host-attr:host": null,
"os-vol-mig-status-attr:migstat": null,
"os-vol-mig-status-attr:name_id": null,
"os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3",
"replication_status": null,
"size": 10,
"snapshot_id": null,
"source_volid": null,
"status": "creating",
"updated_at": null,
"user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e",
"volume_type": "__DEFAULT__"
}
]
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes/volumes-list-response.json0000664 0000000 0000000 00000001113 15131732575 0032067 0 ustar 00root root 0000000 0000000 {
"volumes": [
{
"id": "efa54464-8fab-47cd-a05a-be3e6b396188",
"links": [
{
"href": "http://127.0.0.1:37097/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/efa54464-8fab-47cd-a05a-be3e6b396188",
"rel": "self"
},
{
"href": "http://127.0.0.1:37097/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/efa54464-8fab-47cd-a05a-be3e6b396188",
"rel": "bookmark"
}
],
"name": null
}
]
} volumes-list-summary-response.json 0000664 0000000 0000000 00000000275 15131732575 0033513 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/volumes {
"volume-summary": {
"total_size": 4,
"total_count": 4,
"metadata": {
"key1": ["value1", "value2"],
"key2": ["value2"]
}
}
} cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/worker-cleanup-request.json 0000664 0000000 0000000 00000000365 15131732575 0030532 0 ustar 00root root 0000000 0000000 {
"cluster_name": "test",
"disabled": true,
"host": "host1@lvmdriver",
"service_id": 1,
"is_up": true,
"binary": "cinder-volume",
"resource_id": "b122f668-d15a-40f8-af21-38d218796ab8",
"resource_type": "Volume"
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/samples/worker-cleanup-response.json 0000664 0000000 0000000 00000000315 15131732575 0030673 0 ustar 00root root 0000000 0000000 {
"cleaning": [
{
"id": 1,
"host": "host1@lvmdriver",
"binary": "cinder-volume",
"cluster_name": "test"
}
],
"unavailable": []
}
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/snapshot-manage.inc 0000664 0000000 0000000 00000007230 15131732575 0025325 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Snapshot manage extension (manageable_snapshots)
================================================
Creates or lists snapshots by using existing storage instead of allocating new
storage.
Manage an existing snapshot
~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/manageable_snapshots
Creates a snapshot by using existing storage rather than allocating new
storage.
The caller must specify a reference to an existing storage volume
in the ref parameter in the request. Although each storage driver
might interpret this reference differently, the driver should
accept a reference structure that contains either a source-id
or source-name element, if possible.
The API chooses the size of the snapshot by rounding up the size of
the existing snapshot to the next gibibyte (GiB).
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- snapshot: snapshot_obj
- description: description_snap
- metadata: metadata_snap
- name: name_snap
- ref: ref
- volume_id: volume_id
Request Example
---------------
.. literalinclude:: ./samples/snapshot_manage_extensions/snapshot-manage-request.json
:language: javascript
Response
--------
.. rest_parameters:: parameters.yaml
- snapshot: snapshot_obj
- status: status_snap
- size: size
- metadata: metadata_snap
- name: name_snap
- volume_id: volume_id
- created_at: created_at
- description: description_snap_req
- id: id
- updated_at: updated_at
Response Example
----------------
.. literalinclude:: ./samples/snapshot_manage_extensions/snapshot-manage-response.json
:language: javascript
List summary of snapshots available to manage
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/manageable_snapshots
Search a volume backend and list summary of snapshots which are available to
manage.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- sort: sort
- sort_key: sort_key
- sort_dir: sort_dir
- offset: offset
- limit: limit
- marker: marker
- host: host_query
Response
--------
.. rest_parameters:: parameters.yaml
- manageable-snapshots: manageable-snapshots
- source_reference: source_reference
- safe_to_manage: safe_to_manage
- reference: reference
- source-name: source-name
- size: size
Response Example
----------------
.. literalinclude:: ./samples/snapshot-manage-list-response.json
:language: javascript
List detail of snapshots available to manage
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/manageable_snapshots/detail
Search a volume backend and list detail of snapshots which are available to
manage.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- sort: sort
- sort_key: sort_key
- sort_dir: sort_dir
- offset: offset
- limit: limit
- marker: marker
- host: host_query
Response
--------
.. rest_parameters:: parameters.yaml
- manageable-snapshots: manageable-snapshots
- cinder_id: cinder_id
- source_reference: source_reference
- safe_to_manage: safe_to_manage
- reason_not_safe: reason_not_safe
- reference: reference
- source-name: source-name
- size: size
- extra_info: extra_info
Response Example
----------------
.. literalinclude:: ./samples/snapshot-manage-list-detail-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/valid-boolean-values.inc 0000664 0000000 0000000 00000000642 15131732575 0026251 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
.. _valid-boolean-values:
Valid boolean values
====================
Following is the list of valid values for boolean parameters.
[True, ‘True’, ‘TRUE’, ‘true’, ‘1’, ‘ON’, ‘On’, ‘on’, ‘YES’,
‘Yes’, ‘yes’, ‘y’, ‘t’, False, ‘False’, ‘FALSE’, ‘false’, ‘0’,
‘OFF’, ‘Off’, ‘off’, ‘NO’, ‘No’, ‘no’, ‘n’, ‘f’]
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/vol-transfer-v3.inc 0000664 0000000 0000000 00000012426 15131732575 0025213 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Volume transfers (volume-transfers) (3.55 or later)
===================================================
Transfers a volume from one user to another user.
This is the new transfer APIs with microversion 3.55.
Accept a volume transfer
~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volume-transfers/{transfer_id}/accept
Accepts a volume transfer.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 413
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- transfer_id: transfer_id
- auth_key: auth_key
Request Example
---------------
.. literalinclude:: ./samples/volume_transfers/volume-transfers-accept-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- transfer: transfer
- volume_id: volume_id
- id: id
- links: links
- name: transfer_name
Response Example
----------------
.. literalinclude:: ./samples/volume_transfers/volume-transfers-accept-response.json
:language: javascript
Create a volume transfer
~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volume-transfers
Creates a volume transfer.
**Preconditions**
* The volume ``status`` must be ``available``
* Transferring encrypted volumes is not supported
* If the volume has snapshots, those snapshots must be ``available`` unless
``no_snapshots=True``
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- transfer: transfer
- name: name
- volume_id: volume_id
- no_snapshots: no_snapshots
Request Example
---------------
.. literalinclude:: ./samples/volume_transfers/v3.57/volume-transfers-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- auth_key: auth_key
- links: links
- created_at: created_at
- volume_id: volume_id
- id: id
- name: name
- destination_project_id: destination_project_id
- source_project_id: source_project_id
- accepted: accepted
- no_snapshots: no_snapshots
Response Example
----------------
.. literalinclude:: ./samples/volume_transfers/v3.57/volume-transfers-create-response.json
:language: javascript
List volume transfers for a project
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/volume-transfers
Lists volume transfers.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
- sort: sort_transfer
- sort_key: sort_key_transfer
- sort_dir: sort_dir_transfer
- limit: limit_transfer
- offset: offset_transfer
- marker: marker_transfer
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- volume_id: volume_id
- id: id
- links: links
- name: name
Response Example
----------------
.. literalinclude:: ./samples/volume_transfer/volume-transfers-list-response.json
:language: javascript
Show volume transfer detail
~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/volume-transfers/{transfer_id}
Shows details for a volume transfer.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- transfer_id: transfer_id
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- created_at: created_at
- volume_id: volume_id
- id: id
- links: links
- name: name
- destination_project_id: destination_project_id
- source_project_id: source_project_id
- accepted: accepted
- no_snapshots: no_snapshots
Response Example
----------------
.. literalinclude:: ./samples/volume_transfers/v3.57/volume-transfers-show-response.json
:language: javascript
Delete a volume transfer
~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: DELETE /v3/{project_id}/volume-transfers/{transfer_id}
Deletes a volume transfer.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- transfer_id: transfer_id
List volume transfers and details
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/volume-transfers/detail
Lists volume transfers, with details.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- transfers: transfers
- created_at: created_at
- volume_id: volume_id
- id: id
- links: links
- name: name
- destination_project_id: destination_project_id
- source_project_id: source_project_id
- accepted: accepted
Response Example
----------------
.. literalinclude:: ./samples/volume_transfer/volume-transfers-list-detailed-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/volume-manage.inc 0000664 0000000 0000000 00000010664 15131732575 0025002 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Volume manage extension (manageable_volumes)
============================================
Creates or lists volumes by using existing storage instead of allocating new
storage.
Manage an existing volume
~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/manageable_volumes
Creates a Block Storage volume by using existing storage rather than allocating
new storage.
The caller must specify a reference to an existing storage volume
in the ref parameter in the request. Although each storage driver
might interpret this reference differently, the driver should
accept a reference structure that contains either a source-id
or source-name element, if possible.
The API chooses the size of the volume by rounding up the size of
the existing storage volume to the next gibibyte (GiB).
You cannot manage a volume to an encrypted volume type.
Prior to microversion 3.16 host field was required, with the possibility of
defining the cluster it is no longer required, but we must have either a host
or a cluster field but we cannot have them both with values.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume: volume
- description: description_vol
- availability_zone: availability_zone
- bootable: bootable
- volume_type: name_volume_type_optional
- name: volume_name_optional
- host: host_mutex
- cluster: cluster_mutex
- ref: ref
- metadata: metadata_vol
Request Example
---------------
.. literalinclude:: ./samples/volume_manage_extensions/volume-manage-request.json
:language: javascript
.. literalinclude:: ./samples/volume-manage-request-cluster.json
:language: javascript
Response
--------
.. rest_parameters:: parameters.yaml
- volume: volume
- status: status_vol
- migration_status: migration_status
- user_id: user_id
- attachments: attachments
- links: links_vol
- availability_zone: availability_zone
- bootable: bootable_response
- encrypted: encrypted
- created_at: created_at
- description: description_vol
- updated_at: updated_at
- volume_type: volume_type
- name: name_vol
- replication_status: replication_status
- consistencygroup_id: consistencygroup_id_required
- source_volid: source_volid
- snapshot_id: snapshot_id
- multiattach: multiattach_resp
- metadata: metadata_vol_obj
- id: id_vol
- size: size
Response Example
----------------
.. literalinclude:: ./samples/volume_manage_extensions/volume-manage-response.json
:language: javascript
List summary of volumes available to manage
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/manageable_volumes
Search a volume backend and list summary of volumes which are available to
manage.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- sort: sort
- sort_key: sort_key
- sort_dir: sort_dir
- offset: offset
- limit: limit
- marker: marker
- host: hostname
Response
--------
.. rest_parameters:: parameters.yaml
- manageable-volumes: manageable-volumes
- safe_to_manage: safe_to_manage
- reference: reference
- source-name: source-name
- size: size
Response Example
----------------
.. literalinclude:: ./samples/volume-manage-list-response.json
:language: javascript
List detail of volumes available to manage
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/manageable_volumes/detail
Search a volume backend and list detail of volumes which are available to
manage.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- sort: sort
- sort_key: sort_key
- sort_dir: sort_dir
- offset: offset
- limit: limit
- marker: marker
- host: host_query
Response
--------
.. rest_parameters:: parameters.yaml
- manageable-volumes: manageable-volumes
- cinder_id: cinder_id
- safe_to_manage: safe_to_manage
- reason_not_safe: reason_not_safe
- reference: reference
- source-name: source-name
- size: size
- extra_info: extra_info
Response Example
----------------
.. literalinclude:: ./samples/volume-manage-list-detail-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/volume-type-access.inc 0000664 0000000 0000000 00000005014 15131732575 0025763 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Volume type access (types, action) (types, os-volume-type-access)
=================================================================
Private volume type access to project.
By default, volumes types are public. To create a private volume
type, set the ``is_public`` boolean field to ``false`` at volume
type creation time. To control access to a private volume type,
user needs to add a project to or remove a project from the volume
type. Private volume types without projects are only accessible by
users with the administrative role and context.
Add private volume type access to project
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/types/{volume_type}/action
Adds private volume type access to a project.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_type: volume_type_id
- addProjectAccess: add_project_access
- project: project
Request Example
---------------
.. literalinclude:: ./samples/volume_type/volume-type-access-add-request.json
:language: javascript
Remove private volume type access from project
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/types/{volume_type}/action
Removes private volume type access from a project.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_type: volume_type_id
- removeProjectAccess: remove_project_access
- project: project
Request Example
---------------
.. literalinclude:: ./samples/volume-type-access-delete-request.json
:language: javascript
List private volume type access detail
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method::
GET /v3/{project_id}/types/{volume_type}/os-volume-type-access
Lists project IDs that have access to private volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_type: volume_type_id
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- volume_type_access: volume_type_access
- project_id: project_id
- volume_type_id: volume_type_id_body
Response Example
----------------
.. literalinclude:: ./samples/volume_type/volume-type-access-list-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/volumes-v3-extensions.inc 0000664 0000000 0000000 00000001377 15131732575 0026463 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
API extensions (extensions)
===========================
List Known API extensions
~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/extensions
Lists Block Storage API extensions.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 300
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- updated: updated_at
- description: description_extn
- links: links
- alias: alias
- name: name
Response Example
----------------
.. literalinclude:: ./samples/extensions/extensions-list-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/volumes-v3-snapshots-actions.inc 0000664 0000000 0000000 00000004072 15131732575 0027737 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Snapshot actions (snapshots, action)
====================================
Administrator only, depending on policy settings.
Resets, updates status for a snapshot.
Reset a snapshot's status
~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/snapshots/{snapshot_id}/action
Resets the status. Specify the ``os-reset_status`` action in the request body.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- snapshot_id: snapshot_id_path
- os-reset_status: os-reset_status
- status: status_snap
Request Example
---------------
.. literalinclude:: ./samples/snapshot-status-reset-request.json
:language: javascript
Update status of a snapshot
~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/snapshots/{snapshot_id}/action
Update fields related to the status of a snapshot.
Specify the ``os-update_snapshot_status`` action in the request body.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- snapshot_id: snapshot_id_path
- os-update_snapshot_status: os-update_snapshot_status
- status: status_snap
- progress: snapshot_progress
Request Example
---------------
.. literalinclude:: ./samples/snapshot-status-update-request.json
:language: javascript
Force delete a snapshot
~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/snapshots/{snapshot_id}/action
Attempts to force delete a snapshot, regardless of state. Specify the
``os-force_delete`` action in the request body.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- snapshot_id: snapshot_id_path
- os-force_delete: os-force_delete
Request Example
---------------
.. literalinclude:: ./samples/snapshot-force-delete-request.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/volumes-v3-snapshots.inc 0000664 0000000 0000000 00000031312 15131732575 0026276 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Volume snapshots (snapshots)
============================
A snapshot is a point-in-time copy of the data that a volume
contains.
When you create, list, or delete snapshots, these status values are
possible:
**Snapshot statuses**
+----------------+---------------------------------------------+
| Status | Description |
+----------------+---------------------------------------------+
| creating | The snapshot is being created. |
+----------------+---------------------------------------------+
| available | The snapshot is ready to use. |
+----------------+---------------------------------------------+
| backing-up | The snapshot is being backed up. |
+----------------+---------------------------------------------+
| deleting | The snapshot is being deleted. |
+----------------+---------------------------------------------+
| error | A snapshot creation error occurred. |
+----------------+---------------------------------------------+
| deleted | The snapshot has been deleted. |
+----------------+---------------------------------------------+
| unmanaging | The snapshot is being unmanaged. |
+----------------+---------------------------------------------+
| restoring | The snapshot is being restored to a volume. |
+----------------+---------------------------------------------+
| error_deleting | A snapshot deletion error occurred. |
+----------------+---------------------------------------------+
List snapshots and details
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/snapshots/detail
Lists all Block Storage snapshots, with details, that the project can access,
since v3.31 if non-admin users specify invalid filters in the url, API will
return bad request.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
- sort: sort
- sort_key: sort_key
- sort_dir: sort_dir
- limit: limit
- offset: offset
- marker: marker
- with_count: with_count
- consumes_quota: filter_consumes_quota
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- status: status_snap
- os-extended-snapshot-attributes:progress: os-ext-snap-attr:progress
- description: description_snap_req
- created_at: created_at
- name: name
- user_id: user_id_min
- volume_id: volume_id_snap
- os-extended-snapshot-attributes:project_id: os-ext-snap-attr:project_id
- size: size
- id: id_snap
- metadata: metadata
- count: count
- updated_at: updated_at
- snapshots_links: links_snap
- group_snapshot_id: group_snapshot_id_3_14
- consumes_quota: consumes_quota
Response Example (v3.65)
------------------------
.. literalinclude:: ./samples/snapshots/v3.65/snapshots-list-detailed-response.json
:language: javascript
Create a snapshot
~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/snapshots
Creates a volume snapshot, which is a point-in-time, complete copy of a volume.
You can create a volume from a snapshot.
Prior to API version 3.66, a 'force' flag was required to create a snapshot of
an in-use volume, but this is no longer the case. From API version 3.66, the
'force' flag is invalid when passed in a volume snapshot request. (For
backward compatibility, however, a 'force' flag with a value evaluating to
True is silently ignored.)
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- snapshot: snapshot_obj
- volume_id: volume_id
- name: name_snap_req
- description: description_snap
- force: force_snapshot
- metadata: metadata_snap
Request Example
---------------
.. literalinclude:: ./samples/snapshots/snapshot-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- status: status_snap
- description: description_snap_req
- created_at: created_at
- name: name_snap_req
- snapshot: snapshot_obj
- user_id: user_id_min
- volume_id: volume_id_snap
- metadata: metadata
- id: id_snap
- size: size
- updated_at: updated_at
- group_snapshot_id: group_snapshot_id_3_14
- consumes_quota: consumes_quota
Response Example (v3.65)
------------------------
.. literalinclude:: ./samples/snapshots/v3.65/snapshot-create-response.json
:language: javascript
List accessible snapshots
~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/snapshots
Lists all Block Storage snapshots, with summary information,
that the project can access, since v3.31 if non-admin users
specify invalid filters in the url, API will return bad request.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
- sort: sort
- sort_key: sort_key
- sort_dir: sort_dir
- limit: limit
- offset: offset
- marker: marker
- consumes_quota: filter_consumes_quota
- with_count: with_count
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- status: status_snap
- description: description_snap_req
- created_at: created_at
- name: name
- volume_id: volume_id_snap
- metadata: metadata
- id: id_snap
- size: size
- count: count
- updated_at: updated_at
- snapshots_links: links_snap
Response Example
----------------
.. literalinclude:: ./samples/snapshots/snapshots-list-response.json
:language: javascript
Show a snapshot's metadata
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/snapshots/{snapshot_id}/metadata
Shows metadata for a snapshot.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- snapshot_id: snapshot_id_path
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- metadata: metadata
Response Example
----------------
.. literalinclude:: ./samples/snapshots/snapshot-metadata-show-response.json
:language: javascript
Create a snapshot's metadata
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/snapshots/{snapshot_id}/metadata
Updates metadata for a snapshot.
Creates or replaces metadata items that match keys. Does not modify items that
are not in the request.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- snapshot_id: snapshot_id_path
- metadata: metadata
Request Example
---------------
.. literalinclude:: ./samples/snapshots/snapshot-metadata-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- metadata: metadata
Response Example
----------------
.. literalinclude:: ./samples/snapshots/snapshot-metadata-create-response.json
:language: javascript
Update a snapshot's metadata
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/{project_id}/snapshots/{snapshot_id}/metadata
Replaces all the snapshot's metadata with the key-value pairs in the request.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- snapshot_id: snapshot_id_path
- metadata: metadata
Request Example
---------------
.. literalinclude:: ./samples/snapshots/snapshot-metadata-update-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- metadata: metadata
Response Example
----------------
.. literalinclude:: ./samples/snapshots/snapshot-metadata-update-response.json
:language: javascript
Show a snapshot's details
~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/snapshots/{snapshot_id}
Shows details for a snapshot.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- snapshot_id: snapshot_id_path
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- status: status_snap
- os-extended-snapshot-attributes:progress: os-ext-snap-attr:progress
- description: description_snap_req
- created_at: created_at
- name: name
- snapshot: snapshot_obj
- user_id: user_id_min
- volume_id: volume_id_snap
- os-extended-snapshot-attributes:project_id: os-ext-snap-attr:project_id
- size: size
- id: id_snap
- metadata: metadata
- updated_at: updated_at
- group_snapshot_id: group_snapshot_id_3_14
- consumes_quota: consumes_quota
Response Example (v3.65)
------------------------
.. literalinclude:: ./samples/snapshots/v3.65/snapshot-show-response.json
:language: javascript
Update a snapshot
~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/{project_id}/snapshots/{snapshot_id}
Updates a snapshot.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- snapshot_id: snapshot_id_path
- snapshot: snapshot_obj
- description: description_snap
- name: snapshot_name
Request Example
---------------
.. literalinclude:: ./samples/snapshots/snapshot-update-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- status: status_snap
- description: description_snap_req
- created_at: created_at
- name: name
- snapshot: snapshot_obj
- id: id_snap
- size: size
- volume_id: volume_id_snap
- user_id: user_id_min
- metadata: metadata
- group_snapshot_id: group_snapshot_id_3_14
- consumes_quota: consumes_quota
Response Example (v3.65)
------------------------
.. literalinclude:: ./samples/snapshots/v3.65/snapshot-update-response.json
:language: javascript
Delete a snapshot
~~~~~~~~~~~~~~~~~
.. rest_method:: DELETE /v3/{project_id}/snapshots/{snapshot_id}
Deletes a snapshot.
Preconditions:
- Snapshot status must be ``available`` or ``error``
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 403
- 404
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- snapshot_id: snapshot_id_path
Show a snapshot's metadata for a specific key
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/snapshot/{snapshot_id}/metadata/{key}
Shows metadata for a snapshot for a specific key.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- snapshot_id: snapshot_id_path
- key: key_view
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- meta: meta_snap
Response Example
----------------
.. literalinclude:: ./samples/snapshots/snapshot-metadata-show-key-response.json
:language: javascript
Delete a snapshot's metadata
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method::
DELETE /v3/{project_id}/snapshots/{snapshot_id}/metadata/{key}
Deletes metadata for a snapshot.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- snapshot_id: snapshot_id_path
- key: key_path
Update a snapshot's metadata for a specific key
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/{project_id}/snapshots/{snapshot_id}/metadata/{key}
Update metadata for a snapshot for a specific key.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- snapshot_id: snapshot_id_path
- key: key_update
- meta: meta_snap
Request Example
---------------
.. literalinclude:: ./samples/snapshots/snapshot-metadata-update-key-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- meta: meta_snap
Response Example
----------------
.. literalinclude:: ./samples/snapshots/snapshot-metadata-update-key-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/volumes-v3-types.inc 0000664 0000000 0000000 00000037135 15131732575 0025431 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Volume types (types)
====================
To create an environment with multiple-storage back ends, you must
specify a volume type. The API spawns Block Storage volume back
ends as children to ``cinder-volume``, and keys them from a unique
queue. The API names the back ends ``cinder-volume.HOST.BACKEND``.
For example, ``cinder-volume.ubuntu.lvmdriver``. When you create a
volume, the scheduler chooses an appropriate back end for the
volume type to handle the request.
For information about how to use volume types to create multiple-
storage back ends, see `Configure multiple-storage back ends
`_.
Update a volume type
~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/{project_id}/types/{volume_type_id}
Updates a volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_type_id: volume_type_id
- volume_type: volume_type
- name: name_volume_type_optional
- description: description_volume_type_optional
- is_public: is_public_volume_type_optional
Request Example
---------------
.. literalinclude:: ./samples/volume_type/volume-type-update-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- volume_type: volume_type
- is_public: is_public_volume_type_required
- extra_specs: extra_specs_volume_type_optional
- description: description_volume_type_required
- name: name_volume_type_required
- id: volume_type_id
Response Example
----------------
.. literalinclude:: ./samples/volume_type/volume-type-update-response.json
:language: javascript
Create or update extra specs for volume type
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/types/{volume_type_id}/extra_specs
Adds new extra specifications to a volume type, or updates the extra
specifications that are assigned to a volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_type_id: volume_type_id
- extra_specs: extra_specs_volume_type_required
Request Example
---------------
.. literalinclude::
./samples/volume_type/volume-type-extra-specs-create-update-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- extra_specs: extra_specs_volume_type_required
Response Example
----------------
.. literalinclude::
./samples/volume_type/volume-type-extra-specs-create-update-response.json
:language: javascript
Show all extra specifications for volume type
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/types/{volume_type_id}/extra_specs
Shows all extra specifications assigned to a volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_type_id: volume_type_id
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- extra_specs: extra_specs_volume_type_required
Response Example
----------------
.. literalinclude::
./samples/volume_type/volume-type-all-extra-specs-show-response.json
:language: javascript
Show extra specification for volume type
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/types/{volume_type_id}/extra_specs/{key}
Shows the specific extra specification assigned to a volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_type_id: volume_type_id
- key: key_extra_spec
Response Example
----------------
.. literalinclude::
./samples/volume_type/volume-type-specific-extra-specs-show-response.json
:language: javascript
Update extra specification for volume type
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/{project_id}/types/{volume_type_id}/extra_specs/{key}
Update the specific extra specification assigned to a volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_type_id: volume_type_id
- key: key_extra_spec
Request Example
---------------
.. literalinclude::
./samples/volume_type/volume-type-specific-extra-specs-update-request.json
:language: javascript
Response Example
----------------
.. literalinclude::
./samples/volume_type/volume-type-specific-extra-specs-update-response.json
:language: javascript
Delete extra specification for volume type
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method::
DELETE /v3/{project_id}/types/{volume_type_id}/extra_specs/{key}
Deletes the specific extra specification assigned to a volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_type_id: volume_type_id
- key: key_extra_spec
Show volume type detail
~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/types/{volume_type_id}
Shows details for a volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_type_id: volume_type_id
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- volume_type: volume_type
- is_public: is_public_volume_type_required
- extra_specs: extra_specs_volume_type_optional
- description: description_volume_type_required
- name: name_volume_type_required
- id: volume_type_id_body
- os-volume-type-access:is_public: is_public_volume_type_required
- qos_specs_id: qos_specs_id
Response Example
----------------
.. literalinclude:: ./samples/volume_type/volume-type-show-response.json
:language: javascript
Show default volume type
~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/types/default
Shows details for the default volume type, that is, the volume type that
will be used in the `Create a volume`_ request if you do not specify one.
This could be one of the following:
- Your project's default volume type *(since microversion 3.62)*
- The installation's default volume type as configured by the operator
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 404
- 500
Error conditions
----------------
It is only possible to receive a 404 (Not Found) response in pre-Train
versions of the Block Storage service, as a configured default volume
type has been required since the Train release.
If you receive a 500 (Internal Error Response), then the default volume
type has not been configured correctly by the operator. Please contact
your cloud provider.
* When the default volume type is misconfigured, requests to
`Create a volume`_ that do not include a volume type will
fail.
* The workaround is to include a volume type in your request. You
can `List all volume types`_ to determine a volume type to use.
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- volume_type: volume_type
- is_public: is_public_volume_type_required
- extra_specs: extra_specs_volume_type_optional
- description: description_volume_type_required
- name: name_volume_type_required
- qos_specs_id: qos_specs_id
Response Example
----------------
.. literalinclude:: ./samples/volume_type/volume-type-default-response.json
:language: javascript
Delete a volume type
~~~~~~~~~~~~~~~~~~~~
.. rest_method:: DELETE /v3/{project_id}/types/{volume_type_id}
Deletes a volume type.
*Note to operators:* Since the Train release, untyped volumes are
not allowed, and a configured default volume type is required in each
deployment. An attempt to delete the configured default volume type
will fail.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_type_id: volume_type_id
List all volume types
~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/types
Lists volume types.
To determine which of these is the default type that will be used if you
do not specify one in the `Create a volume`_ request, use the
`Show default volume type`_ request.
*Note to users:* There may be a volume type named ``__DEFAULT__`` in the
list. Try not to use this volume type, unless necessary or instructed by the
operator, in a `Create a volume`_ request. If you wish to create a volume of
*your* default volume type, simply omit the ``volume_type`` parameter in your
`Create a volume`_ request.
*Note to operators:* The ``__DEFAULT__`` volume type was introduced in
the Train release as a placeholder to prevent the creation of untyped
volumes. Under the proper conditions, it may be removed from your
deployment. Consult the Default Volume Types section in
`Cinder Administration Guide
`_
for details.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- is_public: is_public_volume_type_query
- sort: sort
- sort_key: sort_key
- sort_dir: sort_dir
- limit: limit
- offset: offset
- marker: marker
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- volume_types: volume_types
- extra_specs: extra_specs_volume_type_optional
- name: name_volume_type_required
- is_public: is_public_volume_type_required
- description: description_volume_type_required
- id: volume_type_id_body
- os-volume-type-access:is_public: is_public_volume_type_required
- qos_specs_id: qos_specs_id
Response Example
----------------
.. literalinclude:: ./samples/volume_type/volume-types-list-response.json
:language: javascript
Create a volume type
~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/types
Creates a volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_type: volume_type
- name: name_volume_type_required
- os-volume-type-access:is_public: is_public_volume_type_optional
- description: description_volume_type_optional
- extra_specs: extra_specs_volume_type_optional
Request Example
---------------
.. literalinclude:: ./samples/volume_type/volume-type-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- volume_type: volume_type
- is_public: is_public_volume_type_required
- extra_specs: extra_specs_volume_type_optional
- description: description_volume_type_required
- name: name_volume_type_required
- id: volume_type_id_body
- os-volume-type-access:is_public: is_public_volume_type_required
Response Example
----------------
.. literalinclude:: ./samples/volume_type/volume-type-create-response.json
:language: javascript
Show an encryption type
~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/types/{volume_type_id}/encryption
To show an encryption type for an existing volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_type_id: volume_type_id
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- volume_type_id: volume_type_id_body
- encryption_id: encryption_id_body
- key_size: key_size
- provider: provider
- control_location: control_location
- cipher: cipher
- deleted: deleted
- created_at: created_at
- updated_at: updated_at
- deleted_at: deleted_at
Response Example
----------------
.. literalinclude:: ./samples/volume_type/encryption-type-show-response.json
:language: javascript
Show encryption specs item
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/types/{volume_type_id}/encryption/{key}
To show encryption specs item for an existing volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_type_id: volume_type_id
- key: key_encrypt_spec
Response Example
----------------
.. literalinclude::
./samples/volume_type/encryption-type-specific-specs-show-response.json
:language: javascript
Delete an encryption type
~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method::
DELETE /v3/{project_id}/types/{volume_type_id}/encryption/{encryption_id}
To delete an encryption type for an existing volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_type_id: volume_type_id
- encryption_id: encryption_id
Create an encryption type
~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/types/{volume_type_id}/encryption
To create an encryption type for an existing volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_type_id: volume_type_id
- encryption: encryption
- key_size: key_size
- provider: provider_req
- control_location: control_location
- cipher: cipher
Request Example
---------------
.. literalinclude:: ./samples/volume_type/encryption-type-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- encryption: encryption
- volume_type_id: volume_type_id_body
- encryption_id: encryption_id_body
- key_size: key_size
- provider: provider
- control_location: control_location
- cipher: cipher
Response Example
----------------
.. literalinclude:: ./samples/volume_type/encryption-type-create-response.json
:language: javascript
Update an encryption type
~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method::
PUT /v3/{project_id}/types/{volume_type_id}/encryption/{encryption_id}
To update an encryption type for an existing volume type.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_type_id: volume_type_id
- encryption_id: encryption_id
- encryption: encryption
- key_size: key_size
- provider: provider_req_optional
- control_location: control_location
- cipher: cipher
Request Example
---------------
.. literalinclude:: ./samples/volume_type/encryption-type-update-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- encryption: encryption
- key_size: key_size
- provider: provider_optional
- control_location: control_location
- cipher: cipher
Response Example
----------------
.. literalinclude:: ./samples/volume_type/encryption-type-update-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/volumes-v3-versions.inc 0000664 0000000 0000000 00000001321 15131732575 0026121 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
API version details
===================
Show API v3 details
~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/
Shows details for Block Storage API v3.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 403
Request
-------
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- status: version_status
- updated: version_updated
- links: links_res
- min_version: version_min
- version: version_max
- media-types: media_types
- id: version_id
Response Example
----------------
.. literalinclude:: ./samples/versions/version-show-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/volumes-v3-volumes-actions.inc 0000664 0000000 0000000 00000062140 15131732575 0027407 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Volume actions (volumes, action)
================================
Extends the size of, resets statuses for, sets image metadata for,
and removes image metadata from a volume. Attaches a volume to a
server, detaches a volume from a server, and removes a volume from
Block Storage management without actually removing the back-end
storage object associated with it.
Extend a volume size
~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Extends the size of a volume to a requested size, in gibibytes (GiB).
Specify the ``os-extend`` action in the request body.
Preconditions
- Prior to microversion ``3.42`` the volume status must be ``available``.
Starting with microversion ``3.42``, attached volumes with status ``in-use``
may be able to be extended depending on policy and backend volume and
compute driver constraints in the cloud. Note that ``reserved`` is not a
valid state for extend.
- Sufficient amount of storage must exist to extend the volume.
- The user quota must have sufficient volume storage.
Postconditions
- If the request is processed successfully, the volume status will change to
``extending`` while the volume size is being extended.
- Upon successful completion of the extend operation, the volume status will
go back to its original value.
- Starting with microversion ``3.42``, when extending the size of an attached
volume, the Block Storage service will notify the Compute service that an
attached volume has been extended. The Compute service will asynchronously
process the volume size change for the related server instance. This can be
monitored using the ``GET /servers/{server_id}/os-instance-actions`` API in
the Compute service.
Troubleshooting
- An ``error_extending`` volume status indicates that the request
failed. Ensure that you meet the preconditions and retry the
request. If the request fails again, investigate the storage back
end.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- os-extend: os-extend
- new_size: new_size
Request Example
---------------
.. literalinclude:: ./samples/volume-extend-request.json
:language: javascript
Complete extending a volume
~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Specify the ``os-extend_volume_completion`` action in the request body.
Complete extending an attached volume that has been left in status
``extending`` after notifying the compute agent.
Depending on the value of the ``error`` parameter, the extend operation
will be either rolled back or finalized.
**Preconditions**
* The volume must have the status ``extending``.
* The volume's admin metadata must contain a set of keys indicating that
Cinder was waiting for external feedback on the success of the operation.
**Asynchronous Postconditions**
If the ``error`` parameter is ``false`` or missing, and the extend operation
was successfully finalized, the volume status will be ``in-use``.
Otherwise, the volume status will be ``error_extending``.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- volume_id: volume_id_path
- project_id: project_id_path
- os-extend_volume_completion: os-extend_volume_completion
- error: extend_completion_error
Request Example
---------------
.. literalinclude:: ./samples/volume-os-extend_volume_completion-request.json
:language: javascript
Reset a volume's statuses
~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Administrator only. Resets the status, attach status, revert to snapshot,
and migration status for a volume. Specify the ``os-reset_status`` action in
the request body.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- os-reset_status: os-reset_status
- status: status_vol
- migration_status: migration_status
- attach_status: attach_status
Request Example
---------------
.. literalinclude:: ./samples/volume-status-reset-request.json
:language: javascript
Revert volume to snapshot
~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Revert a volume to its latest snapshot, this API only support reverting a
detached volume, and the volume status must be ``available``.
Available since API microversion ``3.40``.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 400
- 403
- 404
- 409
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- revert: revert
- snapshot_id: snapshot_id_revert
Request Example
---------------
.. literalinclude:: ./samples/volume-revert-to-snapshot-request.json
:language: javascript
Set image metadata for a volume
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Sets the image metadata for a volume. Specify the ``os-set_image_metadata``
action in the request body.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- os-set_image_metadata: os-set_image_metadata
- metadata: metadata_image
Request Example
---------------
.. literalinclude:: ./samples/volume-image-metadata-set-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- metadata: metadata_image
Response Example
----------------
.. literalinclude:: ./samples/volume-image-metadata-update-response.json
:language: javascript
Remove image metadata from a volume
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Removes image metadata, by key, from a volume. Specify the
``os-unset_image_metadata`` action in the request body and the ``key`` for the
metadata key and value pair that you want to remove.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- os-unset_image_metadata: os-unset_image_metadata
- key: key
Request Example
---------------
.. literalinclude:: ./samples/volume-image-metadata-unset-request.json
:language: javascript
Show image metadata for a volume
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Shows image metadata for a volume.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- os-show_image_metadata: os-show_image_metadata
Request Example
---------------
.. literalinclude:: ./samples/image-metadata-show-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- metadata: metadata_image
Response Example
----------------
.. literalinclude:: ./samples/image-metadata-show-response.json
:language: javascript
Attach volume to a server
~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Attaches a volume to a server. Specify the ``os-attach`` action in the request
body.
Preconditions
- Volume status must be ``available``.
- You should set ``instance_uuid`` or ``host_name``.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- os-attach: os-attach
- instance_uuid: instance_uuid
- mountpoint: mountpoint
- host_name: host_name
Request Example
---------------
.. literalinclude:: ./samples/volume-attach-request.json
:language: javascript
Detach volume from server
~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Detaches a volume from a server. Specify the ``os-detach`` action in the
request body.
Preconditions
- Volume status must be ``in-use``.
For security reasons (see bug `#2004555
`_), regardless of the policy
defaults, the Block Storage API rejects REST API calls manually made from
users with a 409 status code if completing the request could pose a risk, which
happens if all of these happen:
- The request comes from a user
- There's an instance uuid in provided attachment or in the volume's attachment
- VM exists in Nova
- Instance has the volume attached
- Attached volume in instance is using the attachment
Calls coming from other OpenStack services (like the Compute Service) are
always accepted.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 409
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- os-detach: os-detach
- attachment_id: attachment_id
Request Example
---------------
.. literalinclude:: ./samples/volume-detach-request.json
:language: javascript
Unmanage a volume
~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Removes a volume from Block Storage management without removing the back-end
storage object that is associated with it. Specify the ``os-unmanage`` action
in the request body.
Preconditions
- Volume status must be ``available``.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- os-unmanage: os-unmanage
Request Example
---------------
.. literalinclude:: ./samples/volume-unmanage-request.json
:language: javascript
Force detach a volume
~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Forces a volume to detach. Specify the ``os-force_detach`` action in the
request body.
Rolls back an unsuccessful detach operation after you disconnect
the volume.
Policy defaults enable only users with the administrative role to
perform this operation. Cloud providers can change these permissions
through the ``volume_extension:volume_admin_actions:force_detach`` rule in
the policy configuration file.
For security reasons (see bug `#2004555
`_), regardless of the policy
defaults, the Block Storage API rejects REST API calls manually made from
users with a 409 status code if completing the request could pose a risk, which
happens if all of these happen:
- The request comes from a user
- There's an instance uuid in provided attachment or in the volume's attachment
- VM exists in Nova
- Instance has the volume attached
- Attached volume in instance is using the attachment
Calls coming from other OpenStack services (like the Compute Service) are
always accepted.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 409
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- os-force_detach: os-force_detach
- attachment_id: attachment_id
- connector: connector
Request Example
---------------
.. literalinclude:: ./samples/volume-force-detach-request.json
:language: javascript
Retype a volume
~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Change type of existing volume. Specify the ``os-retype`` action in the request
body.
Change the volume type of existing volume, Cinder may migrate the volume to
proper volume host according to the new volume type.
Retyping an *in-use* volume from a multiattach-capable type to a
non-multiattach-capable type, or vice-versa, is not supported. It is generally
not recommended to retype an *in-use* multiattach volume if that volume has
more than one active read/write attachment.
Policy defaults enable only users with the administrative role or the owner of
the volume to perform this operation. Cloud providers can change these
permissions through the policy configuration file.
Retyping an unencrypted volume to the same size encrypted volume will most
likely fail. Even though the volume is the same size as the source volume, the
encrypted volume needs to store additional encryption information overhead.
This results in the new volume not being large enough to hold all data.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- os-retype: os-retype
- new_type: new_type
- migration_policy: migration_policy
Request Example
---------------
.. literalinclude:: ./samples/volume-os-retype-request.json
:language: javascript
Migrate a volume
~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Specify the ``os-migrate_volume`` action in the request body.
Migrates a volume to the specified host. Starting with the
`3.16 microversion`_ a cluster can be specified instead of a host.
It is generally not recommended to migrate an *in-use* multiattach volume if
that volume has more than one active read/write attachment.
Policy defaults enable only users with the administrative role to perform this
operation. Cloud providers can change these permissions through the
policy configuration file.
.. _3.16 microversion: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id15
**Preconditions**
* The volume ``status`` must be ``available`` or ``in-use``.
* The volume ``migration_status`` must be ``None``, ``deleting``, ``error``,
or ``success``.
* The volume ``replication_status`` must be ``None``, ``disabled`` or
``not-capable``.
* The migration must happen to another host (or cluster) from which the
volume currently resides.
* The volume must not be a member of a group.
* The volume must not have snapshots.
**Asynchronous Postconditions**
On success, the volume ``status`` will return to its original status of
``available`` or ``in-use`` and the ``migration_status`` will be ``success``.
On failure, the ``migration_status`` will be ``error``. In the case of failure,
if ``lock_volume`` was true and the volume was originally ``available`` when
it was migrated, the ``status`` will go back to ``available``.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- volume_id: volume_id_path
- project_id: project_id_path
- os-migrate_volume: os-migrate_volume
- host: migrate_host
- force_host_copy: migrate_force_host_copy
- lock_volume: migrate_lock_volume
- cluster: migrate_cluster
Request Example
---------------
.. literalinclude:: ./samples/volume-os-migrate_volume-request.json
:language: javascript
Complete migration of a volume
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Specify the ``os-migrate_volume_completion`` action in the request body.
Complete the migration of a volume, updating the new volume in the DB,
returning the ``status`` of the new volume to that of the original volume
and finally deleting the original volume.
**Preconditions**
* Both the original and new volume ``migration_status`` must be ``None`` or
both must be set to a non ``None`` value.
* Additionally when set the new volume ``migration_status`` must take the
form of ``target:VOLUME_UUID`` where VOLUME_UUID is the original volume UUID.
**Asynchronous Postconditions**
On success, the volume ``status`` will return to its original status of
``available`` or ``in-use`` and the ``migration_status`` will be ``success``.
On failure, the ``migration_status`` will be ``error``.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- volume_id: volume_id_path
- project_id: project_id_path
- os-migrate_volume_completion: os-migrate_volume_completion
- new_volume: new_volume
- error: migration_completion_error
Request Example
---------------
.. literalinclude:: ./samples/volume-os-migrate_volume_completion-request.json
:language: javascript
Force delete a volume
~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Attempts force-delete of volume, regardless of state. Specify the
``os-force_delete`` action in the request body.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- os-force_delete: os-force_delete
Request Example
---------------
.. literalinclude:: ./samples/volume-force-delete-request.json
:language: javascript
Update a volume's bootable status
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Update the bootable status for a volume, mark it as a bootable volume. Specify
the ``os-set_bootable`` action in the request body.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- os-set_bootable: os-set_bootable
- bootable: bootable_required
Request Example
---------------
.. literalinclude:: ./samples/volume-bootable-status-update-request.json
:language: javascript
Upload volume to image
~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Uploads the specified volume to image service.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- os-volume_upload_image: os-volume_upload_image
- image_name: image_name
- force: force_upload_vol
- disk_format: disk_format_upload
- container_format: container_format_upload
- visibility: visibility_min
- protected: protected
Request Example
---------------
.. literalinclude:: ./samples/volume_actions/volume-upload-to-image-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- os-volume_upload_image: os-volume_upload_image
- status: status_vol
- image_name: image_name
- disk_format: disk_format
- container_format: container_format
- visibility: visibility_min
- protected: protected
- updated_at: updated_at
- image_id: image_id
- display_description: description_vol_req
- id: id_vol
- size: size
- volume_type: volume_type_vol
Response Example
----------------
.. literalinclude:: ./samples/volume_actions/volume-upload-to-image-response.json
:language: javascript
Reserve volume
~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Mark volume as reserved. Specify the ``os-reserve`` action in the
request body.
Preconditions
- Volume status must be ``available``.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- os-reserve: os-reserve
Request Example
---------------
.. literalinclude:: ./samples/volume-reserve-request.json
:language: javascript
Unmark volume as reserved.
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Unmark volume as reserved. Specify the ``os-unreserve`` action in
the request body.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- os-unreserve: os-unreserve
Request Example
---------------
.. literalinclude:: ./samples/volume-unreserve-request.json
:language: javascript
Update volume status to detaching
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Update volume status to 'detaching'.. Specify the ``os-begin_detaching`` action
in the request body.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- os-begin_detaching: os-begin_detaching
Request Example
---------------
.. literalinclude:: ./samples/volume-begin-detaching-request.json
:language: javascript
Roll back volume status to in-use
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Roll back volume status to 'in-use'. Specify the ``os-roll_detaching`` action
in the request body.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- os-roll_detaching: os-roll_detaching
Request Example
---------------
.. literalinclude:: ./samples/volume-roll-detaching-request.json
:language: javascript
Terminate volume attachment
~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Terminate volume attachment. Specify the ``os-terminate_connection``
action in the request body.
Preconditions
- Volume status must be ``in-use``.
For security reasons (see bug `#2004555
`_), regardless of the policy
defaults, the Block Storage API rejects REST API calls manually made from
users with a 409 status code if completing the request could pose a risk, which
happens if all of these happen:
- The request comes from a user
- There's an instance uuid in the volume's attachment
- VM exists in Nova
- Instance has the volume attached
- Attached volume in instance is using the attachment
Calls coming from other OpenStack services (like the Compute Service) are
always accepted.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
.. rest_status_code:: error ../status.yaml
- 409
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- os-terminate_connection: os-terminate_connection
- connector: connector_required
Request Example
---------------
.. literalinclude:: ./samples/volume-terminate-connection-request.json
:language: javascript
Initialize volume attachment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Initialize volume attachment. Specify the ``os-initialize_connection``
action in the request body.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- os-initialize_connection: os-initialize_connection
- connector: connector_required
Request Example
---------------
.. literalinclude:: ./samples/volume-initialize-connection-request.json
:language: javascript
Updates volume read-only access-mode flag
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Enables or disables update of volume to read-only access mode.
Specify the ``os-update_readonly_flag`` action in the request body.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- os-update_readonly_flag: os-update_readonly_flag
- readonly: readonly
Request Example
---------------
.. literalinclude:: ./samples/volume-readonly-update-request.json
:language: javascript
Reimage a volume
~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
Re-image a volume with a specific image. Specify the ``os-reimage`` action
in the request body.
A volume in ``available`` or ``error`` status can be re-imaged directly. To
re-image a volume in ``reserved`` status, you must include the
``reimage_reserved`` parameter set to ``true``.
.. note:: Image signature verification is currently unsupported when
re-imaging a volume.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- image_id: image_id
- reimage_reserved: reimage_reserved
- os-reimage: os-reimage
Request Example
---------------
.. literalinclude:: ./samples/volume-os-reimage-request.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/volumes-v3-volumes.inc 0000664 0000000 0000000 00000050721 15131732575 0025753 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Volumes (volumes)
=================
A volume is a detachable block storage device similar to a USB hard
drive. You can attach a volume to an instance, and if the volume is
of an appropriate volume type, a volume can be attached to multiple
instances.
The ``snapshot_id`` and ``source_volid`` parameters specify the ID
of the snapshot or volume from which this volume originates. If the
volume was not created from a snapshot or source volume, these
values are null.
When you create, list, update, or delete volumes, the possible
status values are:
**Volume statuses**
+------------------+--------------------------------------------------------+
| Status | Description |
+------------------+--------------------------------------------------------+
| creating | The volume is being created. |
+------------------+--------------------------------------------------------+
| available | The volume is ready to attach to an instance. |
+------------------+--------------------------------------------------------+
| reserved | The volume is reserved for attaching or shelved. |
+------------------+--------------------------------------------------------+
| attaching | The volume is attaching to an instance. |
+------------------+--------------------------------------------------------+
| detaching | The volume is detaching from an instance. |
+------------------+--------------------------------------------------------+
| in-use | The volume is attached to an instance. |
+------------------+--------------------------------------------------------+
| maintenance | The volume is locked and being migrated. |
+------------------+--------------------------------------------------------+
| deleting | The volume is being deleted. |
+------------------+--------------------------------------------------------+
| awaiting-transfer| The volume is awaiting for transfer. |
+------------------+--------------------------------------------------------+
| error | A volume creation error occurred. |
+------------------+--------------------------------------------------------+
| error_deleting | A volume deletion error occurred. |
+------------------+--------------------------------------------------------+
| backing-up | The volume is being backed up. |
+------------------+--------------------------------------------------------+
| restoring-backup | A backup is being restored to the volume. |
+------------------+--------------------------------------------------------+
| error_backing-up | A backup error occurred. |
+------------------+--------------------------------------------------------+
| error_restoring | A backup restoration error occurred. |
+------------------+--------------------------------------------------------+
| error_extending | An error occurred while attempting to extend a volume. |
+------------------+--------------------------------------------------------+
| downloading | The volume is downloading an image. |
+------------------+--------------------------------------------------------+
| uploading | The volume is being uploaded to an image. |
+------------------+--------------------------------------------------------+
| retyping | The volume is changing type to another volume type. |
+------------------+--------------------------------------------------------+
| extending | The volume is being extended. |
+------------------+--------------------------------------------------------+
List accessible volumes with details
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/volumes/detail
Lists all Block Storage volumes, with details, that the project can access,
since v3.31 if non-admin users specify invalid filters in the url, API will
return bad request.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
- sort: sort
- sort_key: sort_key
- sort_dir: sort_dir
- limit: limit
- offset: offset
- marker: marker
- metadata: metadata_query
- with_count: with_count
- created_at: filter_created_at
- updated_at: filter_updated_at
- consumes_quota: filter_consumes_quota
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- migration_status: migration_status
- attachments: attachments
- links: links_vol
- availability_zone: availability_zone
- os-vol-host-attr:host: os-vol-host-attr:host
- encrypted: encrypted
- encryption_key_id: encryption_key_id
- updated_at: updated_at
- replication_status: replication_status
- snapshot_id: snapshot_id
- id: id_vol
- size: size
- user_id: user_id
- os-vol-tenant-attr:tenant_id: os-vol-tenant-attr:tenant_id
- os-vol-mig-status-attr:migstat: os-vol-mig-status-attr:migstat
- metadata: metadata_vol_obj
- status: status_vol
- volume_image_metadata: volume_image_metadata
- description: description_vol_req
- multiattach: multiattach_resp
- source_volid: source_volid
- consistencygroup_id: consistencygroup_id_required
- os-vol-mig-status-attr:name_id: os-vol-mig-status-attr:name_id
- name: name_vol
- bootable: bootable_response
- created_at: created_at
- volumes: volumes
- volume_type: volume_type_vol
- volume_type_id: volume_type_id_363
- group_id: group_id_optional
- volumes_links: links_vol_optional
- provider_id: provider_id
- service_uuid: service_uuid
- shared_targets: shared_targets
- shared_targets: shared_targets_tristate
- cluster_name: volume_cluster_name
- consumes_quota: consumes_quota
- count: count
Response Example (v3.65)
------------------------
.. literalinclude:: ./samples/volumes/v3.65/volumes-list-detailed-response.json
:language: javascript
Create a volume
~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes
Creates a volume.
To create a bootable volume, include the UUID of the image from
which you want to create the volume in the ``imageRef`` attribute
in the request body.
Since the Train release, every volume must have a volume type. It
is **optional** to specify a volume type as part of your `Create a
volume` request. If you do not specify one, a default volume type
will be supplied for you. This type may vary according to what
project you are in and how the operator has configured the Block
Storage service. Use the `Show default volume type`_ request to
determine your effective default volume type.
Preconditions
- You must have enough volume storage quota remaining to create a
volume of size requested.
Asynchronous Postconditions
- With correct permissions, you can see the volume status as
``available`` through API calls.
- With correct access, you can see the created volume in the storage
system that OpenStack Block Storage manages.
Troubleshooting
- If volume status remains ``creating`` or shows another error
status, the request failed. Ensure you meet the preconditions
then investigate the storage back end.
- Volume is not created in the storage system that OpenStack Block
Storage manages.
- The storage node needs enough free storage space to match the size
of the volume creation request.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume: volume
- size: size
- availability_zone: availability_zone
- source_volid: source_volid
- description: description_vol
- snapshot_id: snapshot_id
- backup_id: backup_id
- name: volume_name_optional
- imageRef: imageRef
- volume_type: volume_type_detail
- metadata: metadata_vol
- consistencygroup_id: consistencygroup_id_required
- OS-SCH-HNT:scheduler_hints: OS-SCH-HNT:scheduler_hints
Request Example
---------------
.. literalinclude:: ./samples/volumes/volume-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- migration_status: migration_status
- attachments: attachments
- links: links_vol
- availability_zone: availability_zone
- encrypted: encrypted
- updated_at: updated_at
- replication_status: replication_status
- snapshot_id: snapshot_id
- id: id_vol
- size: size
- user_id: user_id
- metadata: metadata_vol_obj
- status: status_vol
- description: description_vol_req
- multiattach: multiattach_resp
- source_volid: source_volid
- volume: volume
- consistencygroup_id: consistencygroup_id_required
- name: name_vol
- bootable: bootable_response
- created_at: created_at
- volume_type: volume_type_vol
- volume_type_id: volume_type_id_363
- group_id: group_id_optional
- provider_id: provider_id
- service_uuid: service_uuid
- shared_targets: shared_targets
- shared_targets: shared_targets_tristate
- cluster_name: volume_cluster_name
- consumes_quota: consumes_quota
Response Example (v3.65)
------------------------
.. literalinclude:: ./samples/volumes/v3.65/volume-create-response.json
:language: javascript
List accessible volumes
~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/volumes
Lists summary information for all Block Storage volumes that the
project can access, since v3.31 if non-admin users specify invalid
filters in the url, API will return bad request.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
.. rest_status_code:: error ../status.yaml
- 400
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
- sort: sort
- sort_key: sort_key
- sort_dir: sort_dir
- limit: limit
- offset: offset
- marker: marker
- metadata: metadata_query
- with_count: with_count
- created_at: filter_created_at
- consumes_quota: filter_consumes_quota
- updated_at: filter_updated_at
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- volumes: volumes
- id: id_vol
- links: links_vol
- name: name_vol
- volumes_links: links_vol_optional
- count: count
Response Example
----------------
.. literalinclude:: ./samples/volumes/volumes-list-response.json
:language: javascript
Show a volume's details
~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/volumes/{volume_id}
Shows details for a volume.
Preconditions
- The volume must exist.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- migration_status: migration_status
- attachments: attachments
- links: links_vol
- availability_zone: availability_zone
- os-vol-host-attr:host: os-vol-host-attr:host
- encrypted: encrypted
- encryption_key_id: encryption_key_id
- updated_at: updated_at
- replication_status: replication_status
- snapshot_id: snapshot_id
- id: id_vol
- size: size
- user_id: user_id
- os-vol-tenant-attr:tenant_id: os-vol-tenant-attr:tenant_id
- os-vol-mig-status-attr:migstat: os-vol-mig-status-attr:migstat
- metadata: metadata_vol_obj
- status: status_vol
- volume_image_metadata: volume_image_metadata
- description: description_vol_req
- multiattach: multiattach_resp
- source_volid: source_volid
- volume: volume
- consistencygroup_id: consistencygroup_id_required
- os-vol-mig-status-attr:name_id: os-vol-mig-status-attr:name_id
- name: name_vol
- bootable: bootable_response
- created_at: created_at
- volume_type: volume_type_vol
- volume_type_id: volume_type_id_363
- service_uuid: service_uuid
- shared_targets: shared_targets
- shared_targets: shared_targets_tristate
- cluster_name: volume_cluster_name
- provider_id: provider_id
- group_id: group_id_optional
- consumes_quota: consumes_quota
Response Example (v3.65)
------------------------
.. literalinclude:: ./samples/volumes/v3.65/volume-show-response.json
:language: javascript
Update a volume
~~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/{project_id}/volumes/{volume_id}
Updates a volume.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- volume: volume
- description: description_vol
- name: volume_name_optional
- metadata: metadata_vol_assoc
Request Example
---------------
.. literalinclude:: ./samples/volumes/volume-update-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- migration_status: migration_status
- attachments: attachments
- links: links_vol
- availability_zone: availability_zone
- encrypted: encrypted
- updated_at: updated_at
- replication_status: replication_status
- snapshot_id: snapshot_id
- id: id_vol
- size: size
- user_id: user_id
- metadata: metadata_vol_obj
- status: status_vol
- description: description_vol_req
- multiattach: multiattach_resp
- source_volid: source_volid
- volume: volume
- consistencygroup_id: consistencygroup_id_required
- name: name_vol
- bootable: bootable_response
- created_at: created_at
- volume_type: volume_type_vol
- volume_type_id: volume_type_id_363
- group_id: group_id_optional
- provider_id: provider_id
- service_uuid: service_uuid
- shared_targets: shared_targets
- shared_targets: shared_targets_tristate
- cluster_name: volume_cluster_name
- consumes_quota: consumes_quota
Response Example (v3.65)
------------------------
.. literalinclude:: ./samples/volumes/v3.65/volume-update-response.json
:language: javascript
Delete a volume
~~~~~~~~~~~~~~~
.. rest_method:: DELETE /v3/{project_id}/volumes/{volume_id}
Deletes a volume.
Preconditions
- Volume status must be ``available``, ``in-use``, ``error``,
``error_restoring``, ``error_extending``, ``error_managing``,
and must not be ``migrating``, ``attached``, ``awaiting-transfer``,
belong to a group, have snapshots or be disassociated from
snapshots after volume transfer.
- The ``cascade`` option can be passed in the request if you want
all snapshots of this volume to be deleted automatically,
which should allow the volume deletion to succeed.
- You cannot delete a volume that is in a migration.
Asynchronous Postconditions
- The volume is deleted in volume index.
- The volume managed by OpenStack Block Storage is deleted in
storage node.
Troubleshooting
- If volume status remains in ``deleting`` or becomes
``error_deleting`` the request failed. Ensure you meet the
preconditions then investigate the storage back end.
- The volume managed by OpenStack Block Storage is not deleted from
the storage system.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- cascade: cascade
- force: force_vol_del
Create metadata for volume
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/metadata
Creates or replaces metadata for a volume. Does not modify items that are not
in the request.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- metadata: metadata_vol_assoc_req
Request Example
---------------
.. literalinclude:: ./samples/volumes/volume-metadata-create-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- metadata: metadata_vol_assoc_req
Response Example
----------------
.. literalinclude:: ./samples/volumes/volume-metadata-create-response.json
:language: javascript
Show a volume's metadata
~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/volumes/{volume_id}/metadata
Shows metadata for a volume.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- metadata: metadata_vol_assoc_req
Response Example
----------------
.. literalinclude:: ./samples/volumes/volume-metadata-show-response.json
:language: javascript
Update a volume's metadata
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/{project_id}/volumes/{volume_id}/metadata
Replaces all the volume's metadata with the key-value pairs in the request.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- metadata: metadata_vol_assoc_req
Request Example
---------------
.. literalinclude:: ./samples/volumes/volume-metadata-update-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- metadata: metadata_vol_assoc_req
Response Example
----------------
.. literalinclude:: ./samples/volumes/volume-metadata-update-response.json
:language: javascript
Show a volume's metadata for a specific key
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/volumes/{volume_id}/metadata/{key}
Shows metadata for a volume for a specific key.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- key: key_view
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- meta: meta
Response Example
----------------
.. literalinclude:: ./samples/volumes/volume-metadata-show-key-response.json
:language: javascript
Delete a volume's metadata
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: DELETE /v3/{project_id}/volumes/{volume_id}/metadata/{key}
Deletes metadata for a volume.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- key: key_path
Update a volume's metadata for a specific key
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. rest_method:: PUT /v3/{project_id}/volumes/{volume_id}/metadata/{key}
Update metadata for a volume for a specific key.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- volume_id: volume_id_path
- key: key_update
- meta: meta
Request Example
---------------
.. literalinclude:: ./samples/volumes/volume-metadata-update-key-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- meta: meta
Response Example
----------------
.. literalinclude:: ./samples/volumes/volume-metadata-update-key-response.json
:language: javascript
Get volumes summary
~~~~~~~~~~~~~~~~~~~
.. rest_method:: GET /v3/{project_id}/volumes/summary
Display volumes summary with total number of volumes and total size in GB.
Available since API microversion 3.12.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 200
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- all_tenants: all-tenants
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- volume-summary: volume-summary
- total_size: total_size
- total_count: total_count_int
- metadata: summary_metadata
Response Example
----------------
.. literalinclude:: ./samples/volumes/volumes-list-summary-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/api-ref/source/v3/worker-cleanup.inc 0000664 0000000 0000000 00000002072 15131732575 0025175 0 ustar 00root root 0000000 0000000 .. -*- rst -*-
Workers (workers)
=================
Cleanup services
~~~~~~~~~~~~~~~~
.. rest_method:: POST v3/{project_id}/workers/cleanup
Request cleanup of services with optional filtering. This API is only
available with microversion 3.24 or later.
Response codes
--------------
.. rest_status_code:: success ../status.yaml
- 202
Request
-------
.. rest_parameters:: parameters.yaml
- project_id: project_id_path
- cluster_name: cluster_mutex
- service_id: service_id
- host: host_service
- binary: binary_required
- is-up: is_up
- disabled: disabled
- resource-id: resource_id
- resource-type: resource_type
Request Example
---------------
.. literalinclude:: ./samples/worker-cleanup-request.json
:language: javascript
Response Parameters
-------------------
.. rest_parameters:: parameters.yaml
- host: host_service
- binary: binary_required
- id: service_id
- cluster_name: cluster_mutex
Response Example
----------------
.. literalinclude:: ./samples/worker-cleanup-response.json
:language: javascript
cinder-27.0.0+git20260115.159.4fef6d9d4/bindep.txt 0000664 0000000 0000000 00000003556 15131732575 0020403 0 ustar 00root root 0000000 0000000 # This is a cross-platform list tracking distribution packages needed for
# install and tests;
# see https://docs.openstack.org/infra/bindep/ for additional information.
build-essential [platform:dpkg test]
gcc [platform:rpm test]
# gettext and graphviz are needed by doc builds only. For transition,
# have them in both doc and test.
# TODO(jaegerandi): Remove test once infra scripts are updated.
gettext [!platform:suse doc test]
gettext-runtime [platform:suse doc test]
graphviz [doc test]
libffi-dev [platform:dpkg]
libffi-devel [platform:redhat]
libffi48-devel [platform:suse]
virtual/libffi [platform:gentoo]
libssl-dev [platform:dpkg]
openssl-devel [platform:rpm !platform:suse]
libopenssl-devel [platform:suse !platform:rpm]
locales [platform:debian]
mariadb [platform:rpm]
mariadb-server [platform:redhat platform:debian]
mariadb-devel [platform:redhat]
libmariadb-dev-compat [platform:debian]
libmysqlclient-dev [platform:ubuntu]
libmysqlclient-devel [platform:suse]
mysql-client [platform:dpkg !platform:debian]
mysql-server [platform:dpkg !platform:debian]
postgresql
postgresql-client [platform:dpkg]
postgresql-devel [platform:rpm]
postgresql-server [platform:rpm]
python3-devel [platform:rpm test]
libpq-dev [platform:dpkg]
thin-provisioning-tools [platform:debian]
libxml2-dev [platform:dpkg test]
libpcre3-dev [platform:dpkg doc]
libpcre-devel [platform:rpm doc]
libxslt-devel [platform:rpm test]
libxslt1-dev [platform:dpkg test]
cryptsetup [platform:rpm]
cryptsetup-bin [platform:dpkg]
# Cinder uses lsscsi via os-brick. Due to bindep usage in devstack and
# elsewhere, we add it here to make sure it is picked up and available in
# os-brick tests. Net result is the same that lsscsi will be installed for any
# cinder installation.
lsscsi
qemu-img [platform:redhat]
qemu-tools [platform:suse]
qemu-utils [platform:dpkg]
libcgroup-tools [platform:rpm]
cgroup-tools [platform:dpkg]
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/ 0000775 0000000 0000000 00000000000 15131732575 0017634 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/__init__.py 0000664 0000000 0000000 00000001400 15131732575 0021740 0 ustar 00root root 0000000 0000000 #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Root Cinder module."""
import os
# Ensure compatibility issues are covered with pythondsn
os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
# Make sure eventlet is loaded
import eventlet # noqa
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/ 0000775 0000000 0000000 00000000000 15131732575 0020405 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/__init__.py 0000664 0000000 0000000 00000003420 15131732575 0022515 0 ustar 00root root 0000000 0000000 # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import paste.urlmap
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def root_app_factory(loader, global_conf, **local_conf):
# To support upgrades from previous api-paste config files, we need
# to check for and remove any legacy references to the v1 or v2 API
if '/v1' in local_conf:
LOG.warning('The v1 API has been removed and is no longer '
'available. Client applications should be '
'using v3, which is currently the only supported '
'version of the Block Storage API.')
del local_conf['/v1']
if '/v2' in local_conf:
LOG.warning('The v2 API has been removed and is no longer available. '
'Client applications must now use the v3 API only. '
'The \'enable_v2_api\' option has been removed and is '
'ignored in the cinder.conf file.')
del local_conf['/v2']
return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/api_utils.py 0000664 0000000 0000000 00000023116 15131732575 0022753 0 ustar 00root root 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import typing
from typing import Any, Generator, Iterable, Optional, Union
from keystoneauth1 import exceptions as ks_exc
from keystoneauth1 import identity
from keystoneauth1 import loading as ka_loading
from keystoneclient import client
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
import webob
from webob import exc
from cinder import exception
from cinder.i18n import _
if typing.TYPE_CHECKING:
from cinder import context
CONF = cfg.CONF
CONF.import_group('keystone_authtoken',
'keystonemiddleware.auth_token.__init__')
LOG = logging.getLogger(__name__)
def _parse_is_public(is_public: Optional[str]) -> Optional[bool]:
"""Parse is_public into something usable.
* True: List public volume types only
* False: List private volume types only
* None: List both public and private volume types
"""
if is_public is None:
# preserve default value of showing only public types
return True
elif is_none_string(is_public):
return None
else:
try:
return strutils.bool_from_string(is_public, strict=True)
except ValueError:
msg = _('Invalid is_public filter [%s]') % is_public
raise exc.HTTPBadRequest(explanation=msg)
def is_none_string(val: Any) -> bool:
"""Check if a string represents a None value."""
if not isinstance(val, str):
return False
return val.lower() == 'none'
def remove_invalid_filter_options(
context: 'context.RequestContext',
filters: dict,
allowed_search_options: Iterable[str]) -> None:
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in filters
if opt not in allowed_search_options]
bad_options = ", ".join(unknown_options)
LOG.debug("Removing options '%s' from query.", bad_options)
for opt in unknown_options:
del filters[opt]
_visible_admin_metadata_keys = ['readonly', 'attached_mode']
def add_visible_admin_metadata(volume) -> None:
"""Add user-visible admin metadata to regular metadata.
Extracts the admin metadata keys that are to be made visible to
non-administrators, and adds them to the regular metadata structure for the
passed-in volume.
"""
visible_admin_meta = {}
if volume.get('volume_admin_metadata'):
if isinstance(volume['volume_admin_metadata'], dict):
volume_admin_metadata = volume['volume_admin_metadata']
for key in volume_admin_metadata:
if key in _visible_admin_metadata_keys:
visible_admin_meta[key] = volume_admin_metadata[key]
else:
for item in volume['volume_admin_metadata']:
if item['key'] in _visible_admin_metadata_keys:
visible_admin_meta[item['key']] = item['value']
# avoid circular ref when volume is a Volume instance
elif (volume.get('admin_metadata') and
isinstance(volume.get('admin_metadata'), dict)):
for key in _visible_admin_metadata_keys:
if key in volume['admin_metadata'].keys():
visible_admin_meta[key] = volume['admin_metadata'][key]
if not visible_admin_meta:
return
# NOTE(zhiyan): update visible administration metadata to
# volume metadata, administration metadata will rewrite existing key.
if volume.get('volume_metadata'):
orig_meta = list(volume.get('volume_metadata'))
for item in orig_meta:
if item['key'] in visible_admin_meta.keys():
item['value'] = visible_admin_meta.pop(item['key'])
for key, value in visible_admin_meta.items():
orig_meta.append({'key': key, 'value': value})
volume['volume_metadata'] = orig_meta
# avoid circular ref when vol is a Volume instance
elif (volume.get('metadata') and
isinstance(volume.get('metadata'), dict)):
volume['metadata'].update(visible_admin_meta)
else:
volume['metadata'] = visible_admin_meta
def validate_integer(value: int, name: str,
min_value: Optional[int] = None,
max_value: Optional[int] = None) -> int:
"""Make sure that value is a valid integer, potentially within range.
:param value: the value of the integer
:param name: the name of the integer
:param min_value: the min value of the integer
:param max_value: the max value of the integer
:returns: integer
"""
try:
value = strutils.validate_integer(value, name, min_value, max_value)
return value
except ValueError as e:
raise webob.exc.HTTPBadRequest(explanation=str(e))
def walk_class_hierarchy(clazz: type,
encountered: Optional[list[type]] = None) -> \
Generator[type, None, None]:
"""Walk class hierarchy, yielding most derived classes first."""
if not encountered:
encountered = []
for subclass in clazz.__subclasses__():
if subclass not in encountered:
encountered.append(subclass)
# drill down to leaves first
for subsubclass in walk_class_hierarchy(subclass, encountered):
yield subsubclass
yield subclass
def _keystone_client(context: 'context.RequestContext',
version: tuple[int, int] = (3, 0)) -> client.Client:
"""Creates and returns an instance of a generic keystone client.
:param context: The request context
:param version: version of Keystone to request
:return: keystoneclient.client.Client object
"""
if context.system_scope is not None:
auth_plugin = identity.Token(
auth_url=CONF.keystone_authtoken.auth_url,
token=context.auth_token,
system_scope=context.system_scope
)
elif context.domain_id is not None:
auth_plugin = identity.Token(
auth_url=CONF.keystone_authtoken.auth_url,
token=context.auth_token,
domain_id=context.domain_id
)
elif context.project_id is not None:
auth_plugin = identity.Token(
auth_url=CONF.keystone_authtoken.auth_url,
token=context.auth_token,
project_id=context.project_id
)
else:
# We're dealing with an unscoped token from keystone that doesn't
# carry any authoritative power outside of the user simplify proving
# they know their own password. This token isn't associated with any
# authorization target (e.g., system, domain, or project).
auth_plugin = context.get_auth_plugin()
client_session = ka_loading.session.Session().load_from_options(
auth=auth_plugin,
insecure=CONF.keystone_authtoken.insecure,
cacert=CONF.keystone_authtoken.cafile,
key=CONF.keystone_authtoken.keyfile,
cert=CONF.keystone_authtoken.certfile,
split_loggers=CONF.service_user.split_loggers)
return client.Client(auth_url=CONF.keystone_authtoken.auth_url,
session=client_session, version=version)
class GenericProjectInfo(object):
"""Abstraction layer for Keystone V2 and V3 project objects"""
def __init__(self,
project_id: str,
project_keystone_api_version: str,
domain_id: Optional[str] = None,
name: Optional[str] = None,
description: Optional[str] = None):
self.id = project_id
self.keystone_api_version = project_keystone_api_version
self.domain_id = domain_id
self.name = name
self.description = description
def get_project(context: 'context.RequestContext',
project_id: str) -> GenericProjectInfo:
"""Method to verify project exists in keystone"""
keystone = _keystone_client(context)
generic_project = GenericProjectInfo(project_id, keystone.version)
project = keystone.projects.get(project_id)
generic_project.domain_id = project.domain_id
generic_project.name = project.name
generic_project.description = project.description
return generic_project
def validate_project_and_authorize(context: 'context.RequestContext',
project_id: str,
policy_check: str,
validate_only: bool = False) -> None:
target_project: Union[GenericProjectInfo, dict]
try:
target_project = get_project(context, project_id)
if not validate_only:
target_project = {'project_id': target_project.id}
context.authorize(policy_check, target=target_project)
except ks_exc.http.NotFound:
explanation = _("Project with id %s not found." % project_id)
raise exc.HTTPNotFound(explanation=explanation)
except exception.NotAuthorized:
explanation = _("You are not authorized to perform this "
"operation.")
raise exc.HTTPForbidden(explanation=explanation)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/common.py 0000664 0000000 0000000 00000050530 15131732575 0022252 0 ustar 00root root 0000000 0000000 # Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import enum
import json
import os
import re
import typing
from typing import Any, Iterable, Optional, Union
import urllib
from oslo_config import cfg
from oslo_log import log as logging
import webob
from cinder.api import api_utils
from cinder.api import microversions as mv
from cinder.common import constants
from cinder import exception
from cinder.i18n import _
if typing.TYPE_CHECKING:
from cinder import context
api_common_opts = [
cfg.IntOpt('osapi_max_limit',
default=1000,
help='The maximum number of items that a collection '
'resource returns in a single response'),
cfg.StrOpt('resource_query_filters_file',
default='/etc/cinder/resource_filters.json',
help="Json file indicating user visible filter "
"parameters for list queries."),
]
CONF = cfg.CONF
CONF.import_opt('public_endpoint', 'cinder.api.views.versions')
CONF.register_opts(api_common_opts)
LOG = logging.getLogger(__name__)
_FILTERS_COLLECTION = None
ATTRIBUTE_CONVERTERS = {'name~': 'display_name~',
'description~': 'display_description~',
'consumes_quota': 'use_quota'}
METADATA_TYPES = enum.Enum('METADATA_TYPES', 'user image')
def get_pagination_params(params: dict,
max_limit: Optional[int] = None) -> tuple:
"""Return marker, limit, offset tuple from request.
:param params: `wsgi.Request`'s GET dictionary, possibly containing
'marker', 'limit', and 'offset' variables. 'marker' is the
id of the last element the client has seen, 'limit' is the
maximum number of items to return and 'offset' is the number
of items to skip from the marker or from the first element.
If 'limit' is not specified, or > max_limit, we default to
max_limit. Negative values for either offset or limit will
cause exc.HTTPBadRequest() exceptions to be raised. If no
offset is present we'll default to 0 and if no marker is
present we'll default to None.
:max_limit: Max value 'limit' return value can take
:returns: Tuple (marker, limit, offset)
"""
max_limit = max_limit or CONF.osapi_max_limit
limit = _get_limit_param(params, max_limit)
marker = _get_marker_param(params)
offset = _get_offset_param(params)
return marker, limit, offset
def _get_limit_param(params: dict, max_limit: Optional[int] = None) -> int:
"""Extract integer limit from request's dictionary or fail.
Defaults to max_limit if not present and returns max_limit if present
'limit' is greater than max_limit.
"""
max_limit = max_limit or CONF.osapi_max_limit
try:
limit = int(params.pop('limit', max_limit))
except ValueError:
msg = _('limit param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
if limit < 0:
msg = _('limit param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
limit = min(limit, max_limit)
return limit
def _get_marker_param(params: dict[str, Any]) -> Optional[str]:
"""Extract marker id from request's dictionary (defaults to None)."""
return params.pop('marker', None)
def _get_offset_param(params: dict[str, Any]) -> int:
"""Extract offset id from request's dictionary (defaults to 0) or fail."""
offset = params.pop('offset', 0)
return api_utils.validate_integer(offset,
'offset',
0,
constants.DB_MAX_INT)
def limited(items: list,
request: webob.Request,
max_limit: Optional[int] = None) -> list:
"""Return a slice of items according to requested offset and limit.
:param items: A sliceable entity
:param request: ``wsgi.Request`` possibly containing 'offset' and 'limit'
GET variables. 'offset' is where to start in the list,
and 'limit' is the maximum number of items to return. If
'limit' is not specified, 0, or > max_limit, we default
to max_limit. Negative values for either offset or limit
will cause exc.HTTPBadRequest() exceptions to be raised.
:kwarg max_limit: The maximum number of items to return from 'items'
"""
max_limit = max_limit or CONF.osapi_max_limit
marker, limit, offset = get_pagination_params(request.GET.copy(),
max_limit)
range_end = offset + (limit or max_limit)
return items[offset:range_end]
def get_sort_params(params: dict,
default_key: str = 'created_at',
default_dir: str = 'desc') -> tuple[list[str], list[str]]:
"""Retrieves sort keys/directions parameters.
Processes the parameters to create a list of sort keys and sort directions
that correspond to either the 'sort' parameter or the 'sort_key' and
'sort_dir' parameter values. The value of the 'sort' parameter is a comma-
separated list of sort keys, each key is optionally appended with
':'.
Note that the 'sort_key' and 'sort_dir' parameters are deprecated in kilo
and an exception is raised if they are supplied with the 'sort' parameter.
The sort parameters are removed from the request parameters by this
function.
:param params: webob.multidict of request parameters (from
cinder.api.openstack.wsgi.Request.params)
:param default_key: default sort key value, added to the list if no
sort keys are supplied
:param default_dir: default sort dir value, added to the list if the
corresponding key does not have a direction
specified
:returns: list of sort keys, list of sort dirs
:raise webob.exc.HTTPBadRequest: If both 'sort' and either 'sort_key' or
'sort_dir' are supplied parameters
"""
if 'sort' in params and ('sort_key' in params or 'sort_dir' in params):
msg = _("The 'sort_key' and 'sort_dir' parameters are deprecated and "
"cannot be used with the 'sort' parameter.")
raise webob.exc.HTTPBadRequest(explanation=msg)
sort_keys = []
sort_dirs = []
if 'sort' in params:
for sort in params.pop('sort').strip().split(','):
sort_key, _sep, sort_dir = sort.partition(':')
if not sort_dir:
sort_dir = default_dir
sort_keys.append(sort_key.strip())
sort_dirs.append(sort_dir.strip())
else:
sort_key = params.pop('sort_key', default_key)
sort_dir = params.pop('sort_dir', default_dir)
sort_keys.append(sort_key.strip())
sort_dirs.append(sort_dir.strip())
return sort_keys, sort_dirs
def get_request_url(request: webob.Request) -> str:
url = request.application_url
headers = request.headers
forwarded = headers.get('X-Forwarded-Host')
if forwarded:
url_parts = list(urllib.parse.urlsplit(url))
url_parts[1] = re.split(r',\s?', forwarded)[-1]
url = urllib.parse.urlunsplit(url_parts).rstrip('/')
return url
def remove_version_from_href(href: str) -> str:
"""Removes the first API version from the href.
Given: 'http://cinder.example.com/v1.1/123'
Returns: 'http://cinder.example.com/123'
Given: 'http://cinder.example.com/v1.1'
Returns: 'http://cinder.example.com'
Given: 'http://cinder.example.com/volume/drivers/v1.1/flashsystem'
Returns: 'http://cinder.example.com/volume/drivers/flashsystem'
"""
parsed_url: Union[list[str], urllib.parse.SplitResult]
parsed_url = urllib.parse.urlsplit(href)
url_parts = parsed_url.path.split('/')
# NOTE: this should match vX.X or vX
expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)')
for x in range(len(url_parts)):
if expression.match(url_parts[x]):
del url_parts[x]
break
new_path = '/'.join(url_parts)
if new_path == parsed_url.path:
msg = 'href %s does not contain version' % href
LOG.debug(msg)
raise ValueError(msg)
parsed_url = list(parsed_url)
parsed_url[2] = new_path
return urllib.parse.urlunsplit(parsed_url)
class ViewBuilder(object):
"""Model API responses as dictionaries."""
_collection_name: Optional[str] = None
def _get_project_id_in_url(self, request: webob.Request) -> str:
project_id = request.environ["cinder.context"].project_id
if project_id and ("/v3/%s" % project_id in request.url):
# project_ids are not mandatory within v3 URLs, but links need
# to include them if the request does.
return project_id
return ''
def _get_links(self,
request: webob.Request,
identifier: str) -> list[dict[str, str]]:
return [{"rel": "self",
"href": self._get_href_link(request, identifier), },
{"rel": "bookmark",
"href": self._get_bookmark_link(request, identifier), }]
def _get_next_link(self,
request: webob.Request,
identifier: str,
collection_name: str) -> str:
"""Return href string with proper limit and marker params."""
params = request.params.copy()
params["marker"] = identifier
prefix = self._update_link_prefix(get_request_url(request),
CONF.public_endpoint)
url = os.path.join(prefix,
self._get_project_id_in_url(request),
collection_name)
return "%s?%s" % (url, urllib.parse.urlencode(params))
def _get_href_link(self, request: webob.Request, identifier: str) -> str:
"""Return an href string pointing to this object."""
prefix = self._update_link_prefix(get_request_url(request),
CONF.public_endpoint)
assert self._collection_name is not None
return os.path.join(prefix,
self._get_project_id_in_url(request),
self._collection_name,
str(identifier))
def _get_bookmark_link(self,
request: webob.Request,
identifier: str) -> str:
"""Create a URL that refers to a specific resource."""
base_url = remove_version_from_href(get_request_url(request))
base_url = self._update_link_prefix(base_url,
CONF.public_endpoint)
assert self._collection_name is not None
return os.path.join(base_url,
self._get_project_id_in_url(request),
self._collection_name,
str(identifier))
def _get_collection_links(self,
request: webob.Request,
items: list,
collection_name: str,
item_count: Optional[int] = None,
id_key: str = "uuid") -> list[dict]:
"""Retrieve 'next' link, if applicable.
The next link is included if we are returning as many items as we can,
given the restrictions of limit optional request parameter and
osapi_max_limit configuration parameter as long as we are returning
some elements.
So we return next link if:
1) 'limit' param is specified and equal to the number of items.
2) 'limit' param is NOT specified and the number of items is
equal to CONF.osapi_max_limit.
:param request: API request
:param items: List of collection items
:param collection_name: Name of collection, used to generate the
next link for a pagination query
:param item_count: Length of the list of the original collection
items
:param id_key: Attribute key used to retrieve the unique ID, used
to generate the next link marker for a pagination query
:returns: links
"""
item_count = item_count or len(items)
limit = _get_limit_param(request.GET.copy())
if len(items) and limit <= item_count:
return self._generate_next_link(items, id_key, request,
collection_name)
return []
def _generate_next_link(self,
items: list,
id_key: str,
request: webob.Request,
collection_name: str) -> list[dict]:
links = []
last_item = items[-1]
if id_key in last_item:
last_item_id = last_item[id_key]
else:
last_item_id = last_item["id"]
links.append({
"rel": "next",
"href": self._get_next_link(request, last_item_id,
collection_name),
})
return links
def _update_link_prefix(self, orig_url: str, prefix: Optional[str]) -> str:
if not prefix:
return orig_url
url_parts = list(urllib.parse.urlsplit(orig_url))
prefix_parts = list(urllib.parse.urlsplit(prefix))
url_parts[0:2] = prefix_parts[0:2]
url_parts[2] = prefix_parts[2] + url_parts[2]
return urllib.parse.urlunsplit(url_parts).rstrip('/')
def get_cluster_host(req: webob.Request,
params: dict,
cluster_version=None) -> tuple[Optional[str],
Optional[str]]:
"""Get cluster and host from the parameters.
This method checks the presence of cluster and host parameters and returns
them depending on the cluster_version.
If cluster_version is False we will never return the cluster_name and we
will require the presence of the host parameter.
If cluster_version is None we will always check for the presence of the
cluster parameter, and if cluster_version is a string with a version we
will only check for the presence of the parameter if the version of the
request is not less than it. In both cases we will require one and only
one parameter, host or cluster.
"""
if (cluster_version is not False and
req.api_version_request.matches(cluster_version)):
cluster_name = params.get('cluster')
msg = _('One and only one of cluster and host must be set.')
else:
cluster_name = None
msg = _('Host field is missing.')
host = params.get('host')
if bool(cluster_name) == bool(host):
raise exception.InvalidInput(reason=msg)
return cluster_name, host
def _initialize_filters() -> None:
global _FILTERS_COLLECTION
if _FILTERS_COLLECTION:
return
if not os.path.exists(CONF.resource_query_filters_file):
LOG.error(
"resource query filters file does not exist: %s",
CONF.resource_query_filters_file)
return
with open(CONF.resource_query_filters_file, 'r') as filters_file:
_FILTERS_COLLECTION = json.load(filters_file)
def get_enabled_resource_filters(resource: Optional[str] = None) -> dict[str,
Any]:
"""Get list of configured/allowed filters for the specified resource.
This method checks resource_query_filters_file and returns dictionary
which contains the specified resource and its allowed filters:
.. code-block:: json
{
"resource": ["filter1", "filter2", "filter3"]
}
if resource is not specified, all of the configuration will be returned,
and if the resource is not found, empty dict will be returned.
"""
try:
_initialize_filters()
assert _FILTERS_COLLECTION is not None
if not resource:
return _FILTERS_COLLECTION
else:
return {resource: _FILTERS_COLLECTION[resource]}
except Exception:
LOG.debug("Failed to collect resource %s's filters.", resource)
return {}
def get_time_comparison_operators() -> tuple[str, ...]:
"""Get time comparison operators.
This method returns tuple which contains the allowed comparison operators.
"""
return ("gt", "gte", "eq", "neq", "lt", "lte")
def convert_filter_attributes(filters, resource):
for key in filters.copy().keys():
if resource in ['volume', 'backup',
'snapshot'] and key in ATTRIBUTE_CONVERTERS.keys():
filters[ATTRIBUTE_CONVERTERS[key]] = filters[key]
filters.pop(key)
def reject_invalid_filters(context: 'context.RequestContext',
filters,
resource: str,
enable_like_filter: bool = False):
invalid_filters = []
for key in filters.copy().keys():
try:
# Only ASCII characters can be valid filter keys,
# in PY2/3, the key can be either unicode or string.
if isinstance(key, str):
key.encode('ascii')
else:
key.decode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
raise webob.exc.HTTPBadRequest(
explanation=_('Filter keys can only contain '
'ASCII characters.'))
if context.is_admin and resource not in ['pool']:
# Allow all options except resource is pool
# pool API is only available for admin
return
# Check the configured filters against those passed in resource
configured_filters: Iterable
configured_filters = get_enabled_resource_filters(resource)
if configured_filters:
configured_filters = configured_filters[resource]
else:
configured_filters = []
for key in filters.copy().keys():
if not enable_like_filter:
if key not in configured_filters:
invalid_filters.append(key)
else:
# If 'key~' is configured, both 'key' and 'key~' are valid.
if not (key in configured_filters or
"%s~" % key in configured_filters):
invalid_filters.append(key)
if invalid_filters:
if 'all_tenants' in invalid_filters:
# NOTE: this is a special case: the cinderclient always adds
# 'all_tenants', so we don't want to hold that against a non-admin
# user and we silently ignore it. See Bug #1917574.
invalid_filters.remove('all_tenants')
filters.pop('all_tenants')
if len(invalid_filters) == 0:
return
raise webob.exc.HTTPBadRequest(
explanation=_('Invalid filters %s are found in query '
'options.') % ','.join(invalid_filters))
def process_general_filtering(resource):
def wrapper(process_non_general_filtering):
def _decorator(*args, **kwargs):
req_version = kwargs.get('req_version')
filters = kwargs.get('filters')
ctxt = kwargs.get('context')
ctxt = typing.cast('context.RequestContext', ctxt)
assert req_version is not None
if req_version.matches(mv.RESOURCE_FILTER):
support_like = False
if req_version.matches(mv.LIKE_FILTER):
support_like = True
reject_invalid_filters(ctxt, filters,
resource, support_like)
convert_filter_attributes(filters, resource)
else:
process_non_general_filtering(*args, **kwargs)
return _decorator
return wrapper
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/ 0000775 0000000 0000000 00000000000 15131732575 0022045 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/__init__.py 0000664 0000000 0000000 00000002305 15131732575 0024156 0 ustar 00root root 0000000 0000000 # Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Contrib contains extensions that are shipped with cinder.
It can't be called 'extensions' because that causes namespacing problems.
"""
from oslo_config import cfg
from oslo_log import log as logging
from cinder.api import extensions
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def standard_extensions(ext_mgr):
extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__)
def select_extensions(ext_mgr):
extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__,
CONF.osapi_volume_ext_list)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/admin_actions.py 0000664 0000000 0000000 00000036171 15131732575 0025237 0 ustar 00root root 0000000 0000000 # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import HTTPStatus
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import strutils
import webob
from cinder.api import common
from cinder.api import extensions
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import admin_actions as schema
from cinder.api import validation
from cinder import backup
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import volume
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
class VolumeAdminController(wsgi.Controller):
"""AdminController for Volumes."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.volume_api = volume.API()
def authorize(self, context, action_name, target_obj=None):
context.authorize(
'volume_extension:volume_admin_actions:%(action)s' %
{'action': action_name}, target_obj=target_obj
)
def _notify_reset_status(self, context, id, message):
volume = objects.Volume.get_by_id(context, id)
volume_utils.notify_about_volume_usage(context, volume,
message)
@wsgi.response(HTTPStatus.ACCEPTED)
@wsgi.action('os-reset_status')
@validation.schema(schema.reset_status_volume)
def _reset_status(self, req, id, body):
"""Reset status on the volume."""
def _clean_volume_attachment(context, id):
attachments = (
db.volume_attachment_get_all_by_volume_id(context, id))
for attachment in attachments:
db.volume_detached(context.elevated(), id, attachment.id)
db.volume_admin_metadata_delete(context.elevated(), id,
'attached_mode')
update = {}
body = body['os-reset_status']
status = body.get('status', None)
attach_status = body.get('attach_status', None)
migration_status = body.get('migration_status', None)
if status:
update['status'] = status.lower()
if attach_status:
update['attach_status'] = attach_status.lower()
if migration_status:
update['migration_status'] = migration_status.lower()
if update['migration_status'] == 'none':
update['migration_status'] = None
context = req.environ['cinder.context']
# any exceptions raised will be handled at the wsgi level
volume = objects.Volume.get_by_id(context, id)
self.authorize(context, 'reset_status', target_obj=volume)
# at this point, we still don't know if we're going to
# reset the volume's state. Need to check what the caller
# is requesting first.
if update.get('status') in ('deleting', 'error_deleting'
'detaching'):
msg = _("Cannot reset-state to %s"
% update.get('status'))
raise webob.exc.HTTPBadRequest(explanation=msg)
if update.get('status') == 'in-use':
attachments = (
db.volume_attachment_get_all_by_volume_id(context, id))
if not attachments:
msg = _("Cannot reset-state to in-use "
"because volume does not have any attachments.")
raise webob.exc.HTTPBadRequest(explanation=msg)
msg = "Updating volume '%(id)s' with '%(update)r'"
LOG.debug(msg, {'id': id, 'update': update})
self._notify_reset_status(context, id, 'reset_status.start')
db.volume_update(context, id, update)
# Remove the cleanup worker from the DB when we change a resource
# status since it renders useless the entry.
res = db.worker_destroy(context, resource_type='VOLUME',
resource_id=id)
if res:
LOG.debug('Worker entry for volume with id %s has been deleted.',
id)
if update.get('attach_status') == 'detached':
_clean_volume_attachment(context, id)
self._notify_reset_status(context, id, 'reset_status.end')
@wsgi.response(HTTPStatus.ACCEPTED)
@wsgi.action('os-force_detach')
@validation.schema(schema.force_detach)
def _force_detach(self, req, id, body):
"""Roll-back a bad detach after the volume been disconnected."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, id)
self.authorize(context, 'force_detach', target_obj=volume)
connector = body['os-force_detach'].get('connector', None)
try:
self.volume_api.terminate_connection(context, volume, connector)
except exception.VolumeBackendAPIException:
msg = _("Unable to terminate volume connection from backend.")
raise webob.exc.HTTPInternalServerError(explanation=msg)
attachment_id = body['os-force_detach'].get('attachment_id', None)
try:
self.volume_api.detach(context, volume, attachment_id)
except messaging.RemoteError as error:
if error.exc_type in ['VolumeAttachmentNotFound',
'InvalidVolume']:
msg = _("Error force detaching volume - %(err_type)s: "
"%(err_msg)s") % {'err_type': error.exc_type,
'err_msg': error.value}
raise webob.exc.HTTPBadRequest(explanation=msg)
else:
# There are also few cases where force-detach call could fail
# due to db or volume driver errors. These errors shouldn't
# be exposed to the user and in such cases it should raise
# 500 error.
raise
@wsgi.response(HTTPStatus.ACCEPTED)
@wsgi.action('os-migrate_volume')
@validation.schema(schema.migrate_volume, mv.BASE_VERSION,
mv.get_prior_version(mv.VOLUME_MIGRATE_CLUSTER))
@validation.schema(schema.migrate_volume_v316,
mv.VOLUME_MIGRATE_CLUSTER)
def _migrate_volume(self, req, id, body):
"""Migrate a volume to the specified host."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, id)
self.authorize(context, 'migrate_volume', target_obj=volume)
params = body['os-migrate_volume']
cluster_name, host = common.get_cluster_host(req, params,
mv.VOLUME_MIGRATE_CLUSTER)
force_host_copy = strutils.bool_from_string(params.get(
'force_host_copy', False), strict=True)
lock_volume = strutils.bool_from_string(params.get(
'lock_volume', False), strict=True)
self.volume_api.migrate_volume(context, volume, host, cluster_name,
force_host_copy, lock_volume)
@wsgi.action('os-migrate_volume_completion')
@validation.schema(schema.migrate_volume_completion)
def _migrate_volume_completion(self, req, id, body):
"""Complete an in-progress migration."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, id)
self.authorize(context, 'migrate_volume_completion', target_obj=volume)
params = body['os-migrate_volume_completion']
new_volume_id = params['new_volume']
# Not found exception will be handled at the wsgi level
new_volume = self.volume_api.get(context, new_volume_id)
error = params.get('error', False)
ret = self.volume_api.migrate_volume_completion(context, volume,
new_volume, error)
return {'save_volume_id': ret}
@wsgi.response(HTTPStatus.ACCEPTED)
@wsgi.action('os-extend_volume_completion')
@validation.schema(schema.extend_volume_completion)
def _extend_volume_completion(self, req, id, body):
"""Complete an in-progress extend operation."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, id)
self.authorize(context, 'extend_volume_completion', target_obj=volume)
params = body['os-extend_volume_completion']
error = params.get('error', False)
self.volume_api.extend_volume_completion(context, volume, error)
@wsgi.response(HTTPStatus.ACCEPTED)
@wsgi.action('os-force_delete')
@validation.schema(schema.force_delete_volume)
def _force_delete(self, req, id, body):
"""Delete a volume, bypassing the check that it must be available."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
resource = self.volume_api.get(context, id)
self.authorize(context, 'force_delete', target_obj=resource)
self.volume_api.delete(context, resource, force=True)
class SnapshotAdminController(wsgi.Controller):
"""AdminController for Snapshots."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.volume_api = volume.API()
def authorize(self, context, action_name, target_obj=None):
context.authorize(
'volume_extension:snapshot_admin_actions:%(action)s' %
{'action': action_name}, target_obj=target_obj
)
def _notify_reset_status(self, context, id, message):
snapshot = objects.Snapshot.get_by_id(context, id)
volume_utils.notify_about_snapshot_usage(context, snapshot,
message)
@wsgi.response(HTTPStatus.ACCEPTED)
@wsgi.action('os-reset_status')
@validation.schema(schema.reset_status_snapshot)
def _reset_status(self, req, id, body):
"""Reset status on the snapshot."""
def _clean_volume_attachment(context, id):
attachments = (
db.volume_attachment_get_all_by_volume_id(context, id))
for attachment in attachments:
db.volume_detached(context.elevated(), id, attachment.id)
db.volume_admin_metadata_delete(context.elevated(), id,
'attached_mode')
context = req.environ['cinder.context']
status = body['os-reset_status']['status']
update = {'status': status.lower()}
msg = "Updating snapshot '%(id)s' with '%(update)r'"
LOG.debug(msg, {'id': id, 'update': update})
self._notify_reset_status(context, id, 'reset_status.start')
# Not found exception will be handled at the wsgi level
snapshot = objects.Snapshot.get_by_id(context, id)
self.authorize(context, 'reset_status', target_obj=snapshot)
snapshot.update(update)
snapshot.save()
# Remove the cleanup worker from the DB when we change a resource
# status since it renders useless the entry.
res = db.worker_destroy(context, resource_type='SNAPSHOT',
resource_id=id)
if res:
LOG.debug('Worker entry for snapshot with id %s has been deleted.',
id)
if update.get('attach_status') == 'detached':
_clean_volume_attachment(context, id)
self._notify_reset_status(context, id, 'reset_status.end')
@wsgi.response(HTTPStatus.ACCEPTED)
@wsgi.action('os-force_delete')
@validation.schema(schema.force_delete_snapshot)
def _force_delete(self, req, id, body):
"""Delete a snapshot, bypassing the check that it must be available."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
resource = self.volume_api.get_snapshot(context, id)
self.authorize(context, 'force_delete', target_obj=resource)
self.volume_api.delete_snapshot(context, resource, force=True)
class BackupAdminController(wsgi.Controller):
"""AdminController for Backups."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.backup_api = backup.API()
def authorize(self, context, action_name, target_obj=None):
context.authorize(
'volume_extension:backup_admin_actions:%(action)s' %
{'action': action_name}, target_obj=target_obj
)
def _notify_reset_status(self, context, id, message):
backup = objects.Backup.get_by_id(context, id)
volume_utils.notify_about_backup_usage(context, backup,
message)
@wsgi.response(HTTPStatus.ACCEPTED)
@wsgi.action('os-reset_status')
@validation.schema(schema.reset_status_backup)
def _reset_status(self, req, id, body):
"""Reset status on the backup."""
context = req.environ['cinder.context']
status = body['os-reset_status']['status']
update = {'status': status.lower()}
msg = "Updating backup '%(id)s' with '%(update)r'"
LOG.debug(msg, {'id': id, 'update': update})
self._notify_reset_status(context, id, 'reset_status.start')
# Not found exception will be handled at the wsgi level
self.backup_api.reset_status(context=context, backup_id=id,
status=update['status'])
# the backup API takes care of the reset_status.end notification
@wsgi.response(HTTPStatus.ACCEPTED)
@wsgi.action('os-force_delete')
@validation.schema(schema.force_delete_backup)
def _force_delete(self, req, id, body):
"""Delete a backup, bypassing the check that it must be available."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
resource = self.backup_api.get(context, id)
self.authorize(context, 'force_delete', target_obj=resource)
self.backup_api.delete(context, resource, force=True)
class Admin_actions(extensions.ExtensionDescriptor):
"""Enable admin actions."""
name = "AdminActions"
alias = "os-admin-actions"
updated = "2012-08-25T00:00:00+00:00"
def get_controller_extensions(self):
return [
extensions.ControllerExtension(
self, 'volumes', VolumeAdminController()),
extensions.ControllerExtension(
self, 'snapshots', SnapshotAdminController()),
extensions.ControllerExtension(
self, 'backups', BackupAdminController()),
]
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/availability_zones.py 0000664 0000000 0000000 00000003224 15131732575 0026310 0 ustar 00root root 0000000 0000000 # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import extensions
from cinder.api.openstack import wsgi
import cinder.api.views.availability_zones
import cinder.exception
import cinder.volume.api
class AvailabilityZoneController(wsgi.Controller):
_view_builder_class = cinder.api.views.availability_zones.ViewBuilder
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.volume_api = cinder.volume.api.API()
def index(self, req):
"""Describe all known availability zones."""
azs = self.volume_api.list_availability_zones()
return self._view_builder.list(req, azs)
class Availability_zones(extensions.ExtensionDescriptor):
"""Describe Availability Zones."""
name = 'AvailabilityZones'
alias = 'os-availability-zone'
updated = '2013-06-27T00:00:00+00:00'
def get_resources(self):
controller = AvailabilityZoneController()
res = extensions.ResourceExtension(Availability_zones.alias,
controller)
return [res]
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/backups.py 0000664 0000000 0000000 00000027215 15131732575 0024056 0 ustar 00root root 0000000 0000000 # Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2014 TrilioData, Inc
# Copyright (c) 2015 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The backups api."""
from http import HTTPStatus
from oslo_log import log as logging
from oslo_utils import strutils
from webob import exc
from cinder.api import api_utils
from cinder.api import common
from cinder.api import extensions
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import backups as schema
from cinder.api import validation
from cinder.api.views import backups as backup_views
from cinder import backup as backupAPI
from cinder import exception
from cinder import utils
from cinder import volume as volumeAPI
LOG = logging.getLogger(__name__)
class BackupsController(wsgi.Controller):
"""The Backups API controller for the OpenStack API."""
_view_builder_class = backup_views.ViewBuilder
def __init__(self):
self.backup_api = backupAPI.API()
self.volume_api = volumeAPI.API()
super(BackupsController, self).__init__()
def show(self, req, id):
"""Return data about the given backup."""
LOG.debug('Show backup with id: %s.', id)
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
backup = self.backup_api.get(context, backup_id=id)
req.cache_db_backup(backup)
return self._view_builder.detail(req, backup)
@wsgi.response(HTTPStatus.ACCEPTED)
def delete(self, req, id):
"""Delete a backup."""
context = req.environ['cinder.context']
LOG.info('Delete backup with id: %s.', id)
try:
backup = self.backup_api.get(context, id)
self.backup_api.delete(context, backup)
# Not found exception will be handled at the wsgi level
except exception.InvalidBackup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
def index(self, req):
"""Returns a summary list of backups."""
return self._get_backups(req, is_detail=False)
def detail(self, req):
"""Returns a detailed list of backups."""
return self._get_backups(req, is_detail=True)
@staticmethod
def _get_backup_filter_options():
"""Return volume search options allowed by non-admin."""
return ('name', 'status', 'volume_id')
@common.process_general_filtering('backup')
def _process_backup_filtering(self, context=None, filters=None,
req_version=None):
api_utils.remove_invalid_filter_options(
context,
filters,
self._get_backup_filter_options())
def _convert_sort_name(self, req_version, sort_keys):
"""Convert sort key "name" to "display_name". """
pass
def _get_backups(self, req, is_detail):
"""Returns a list of backups, transformed through view builder."""
context = req.environ['cinder.context']
filters = req.params.copy()
req_version = req.api_version_request
marker, limit, offset = common.get_pagination_params(filters)
sort_keys, sort_dirs = common.get_sort_params(filters)
show_count = False
if req_version.matches(
mv.SUPPORT_COUNT_INFO) and 'with_count' in filters:
show_count = utils.get_bool_param('with_count', filters)
filters.pop('with_count')
self._convert_sort_name(req_version, sort_keys)
self._process_backup_filtering(context=context, filters=filters,
req_version=req_version)
if 'name' in filters:
filters['display_name'] = filters.pop('name')
backups = self.backup_api.get_all(context, search_opts=filters.copy(),
marker=marker,
limit=limit,
offset=offset,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
)
total_count = None
if show_count:
total_count = self.volume_api.calculate_resource_count(
context, 'backup', filters)
req.cache_db_backups(backups.objects)
if is_detail:
backups = self._view_builder.detail_list(req, backups.objects,
total_count)
else:
backups = self._view_builder.summary_list(req, backups.objects,
total_count)
return backups
# TODO(frankm): Add some checks here including
# - whether requested volume_id exists so we can return some errors
# immediately
# - maybe also do validation of swift container name
@wsgi.response(HTTPStatus.ACCEPTED)
@validation.schema(schema.create, mv.BASE_VERSION,
mv.get_prior_version(mv.BACKUP_METADATA))
@validation.schema(schema.create_backup_v343, mv.BACKUP_METADATA,
mv.get_prior_version(mv.BACKUP_AZ))
@validation.schema(schema.create_backup_v351, mv.BACKUP_AZ)
def create(self, req, body):
"""Create a new backup."""
LOG.debug('Creating new backup %s', body)
context = req.environ['cinder.context']
req_version = req.api_version_request
backup = body['backup']
container = backup.get('container', None)
volume_id = backup['volume_id']
self.clean_name_and_description(backup)
name = backup.get('name', None)
description = backup.get('description', None)
incremental = strutils.bool_from_string(backup.get(
'incremental', False), strict=True)
force = strutils.bool_from_string(backup.get(
'force', False), strict=True)
snapshot_id = backup.get('snapshot_id', None)
metadata = backup.get('metadata', None) if req_version.matches(
mv.BACKUP_METADATA) else None
if req_version.matches(mv.BACKUP_AZ):
availability_zone = backup.get('availability_zone', None)
else:
availability_zone = None
az_text = ' in az %s' % availability_zone if availability_zone else ''
LOG.info("Creating backup of volume %(volume_id)s in container"
" %(container)s%(az)s",
{'volume_id': volume_id, 'container': container,
'az': az_text},
context=context)
try:
new_backup = self.backup_api.create(context, name, description,
volume_id, container,
incremental, availability_zone,
force, snapshot_id, metadata)
except (exception.InvalidVolume,
exception.InvalidSnapshot,
exception.InvalidVolumeMetadata,
exception.InvalidVolumeMetadataSize) as error:
raise exc.HTTPBadRequest(explanation=error.msg)
# Other not found exceptions will be handled at the wsgi level
except exception.ServiceNotFound as error:
raise exc.HTTPServiceUnavailable(explanation=error.msg)
retval = self._view_builder.summary(req, dict(new_backup))
return retval
@wsgi.response(HTTPStatus.ACCEPTED)
@validation.schema(schema.restore)
def restore(self, req, id, body):
"""Restore an existing backup to a volume."""
LOG.debug('Restoring backup %(backup_id)s (%(body)s)',
{'backup_id': id, 'body': body})
context = req.environ['cinder.context']
restore = body['restore']
volume_id = restore.get('volume_id', None)
name = restore.get('name', None)
LOG.info("Restoring backup %(backup_id)s to volume %(volume_id)s.",
{'backup_id': id, 'volume_id': volume_id},
context=context)
try:
new_restore = self.backup_api.restore(context,
backup_id=id,
volume_id=volume_id,
name=name)
# Not found exception will be handled at the wsgi level
except (exception.InvalidInput,
exception.InvalidVolume,
exception.InvalidBackup) as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except (exception.VolumeSizeExceedsAvailableQuota,
exception.VolumeLimitExceeded) as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.msg, headers={'Retry-After': '0'})
retval = self._view_builder.restore_summary(
req, dict(new_restore))
return retval
def export_record(self, req, id):
"""Export a backup."""
LOG.debug('Export record for backup %s.', id)
context = req.environ['cinder.context']
try:
backup_info = self.backup_api.export_record(context, id)
# Not found exception will be handled at the wsgi level
except exception.InvalidBackup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
retval = self._view_builder.export_summary(
req, dict(backup_info))
LOG.debug('Exported record output: %s.', retval)
return retval
@wsgi.response(HTTPStatus.CREATED)
@validation.schema(schema.import_record)
def import_record(self, req, body):
"""Import a backup."""
LOG.debug('Importing record from %s.', body)
context = req.environ['cinder.context']
import_data = body['backup-record']
backup_service = import_data['backup_service']
backup_url = import_data['backup_url']
LOG.debug('Importing backup using %(service)s and url %(url)s.',
{'service': backup_service, 'url': backup_url})
try:
new_backup = self.backup_api.import_record(context,
backup_service,
backup_url)
except exception.InvalidBackup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
# Other Not found exceptions will be handled at the wsgi level
except exception.ServiceNotFound as error:
raise exc.HTTPServiceUnavailable(explanation=error.msg)
retval = self._view_builder.summary(req, dict(new_backup))
LOG.debug('Import record output: %s.', retval)
return retval
class Backups(extensions.ExtensionDescriptor):
"""Backups support."""
name = 'Backups'
alias = 'backups'
updated = '2012-12-12T00:00:00+00:00'
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
Backups.alias, BackupsController(),
collection_actions={'detail': 'GET', 'import_record': 'POST'},
member_actions={'restore': 'POST', 'export_record': 'GET',
'action': 'POST'})
resources.append(res)
return resources
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/capabilities.py 0000664 0000000 0000000 00000005140 15131732575 0025050 0 ustar 00root root 0000000 0000000 # Copyright (c) 2015 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_messaging
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.views import capabilities as capabilities_view
from cinder.common import constants
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.policies import capabilities as policy
from cinder.volume import rpcapi
class CapabilitiesController(wsgi.Controller):
"""The Capabilities controller for the OpenStack API."""
_view_builder_class = capabilities_view.ViewBuilder
def __init__(self):
# FIXME(jdg): Is it kosher that this just
# skips the volume.api and goes straight to RPC
# from here?
self.volume_api = rpcapi.VolumeAPI()
super(CapabilitiesController, self).__init__()
def show(self, req, id):
"""Return capabilities list of given backend."""
context = req.environ['cinder.context']
context.authorize(policy.CAPABILITIES_POLICY)
filters = {'host_or_cluster': id, 'binary': constants.VOLUME_BINARY}
services = objects.ServiceList.get_all(context, filters)
if not services:
msg = (_("Can't find service: %s") % id)
raise exception.NotFound(msg)
topic = services[0].service_topic_queue
try:
capabilities = self.volume_api.get_capabilities(context, topic,
False)
except oslo_messaging.MessagingTimeout:
raise exception.RPCTimeout(service=topic)
return self._view_builder.summary(req, capabilities, topic)
class Capabilities(extensions.ExtensionDescriptor):
"""Capabilities support."""
name = "Capabilities"
alias = "capabilities"
updated = "2015-08-31T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
Capabilities.alias,
CapabilitiesController())
resources.append(res)
return resources
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/cgsnapshots.py 0000664 0000000 0000000 00000014154 15131732575 0024760 0 ustar 00root root 0000000 0000000 # Copyright (C) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The cgsnapshots api."""
from http import HTTPStatus
from oslo_log import log as logging
from oslo_log import versionutils
import webob
from webob import exc
from cinder.api import common
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.schemas import cgsnapshots as schema
from cinder.api import validation
from cinder.api.views import cgsnapshots as cgsnapshot_views
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _
LOG = logging.getLogger(__name__)
DEPRECATE_CGSNAP_API_MSG = ("Consistency Group Snapshot APIs are deprecated. "
"Use Generic Volume Group Snapshot APIs instead.")
class CgsnapshotsController(wsgi.Controller):
"""The cgsnapshots API controller for the OpenStack API."""
_view_builder_class = cgsnapshot_views.ViewBuilder
def __init__(self):
self.group_snapshot_api = group_api.API()
super(CgsnapshotsController, self).__init__()
def show(self, req, id):
"""Return data about the given cgsnapshot."""
versionutils.report_deprecated_feature(LOG, DEPRECATE_CGSNAP_API_MSG)
LOG.debug('show called for member %s', id)
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
cgsnapshot = self._get_cgsnapshot(context, id)
return self._view_builder.detail(req, cgsnapshot)
def delete(self, req, id):
"""Delete a cgsnapshot."""
versionutils.report_deprecated_feature(LOG, DEPRECATE_CGSNAP_API_MSG)
LOG.debug('delete called for member %s', id)
context = req.environ['cinder.context']
LOG.info('Delete cgsnapshot with id: %s', id)
try:
cgsnapshot = self._get_cgsnapshot(context, id)
self.group_snapshot_api.delete_group_snapshot(context, cgsnapshot)
except exception.InvalidGroupSnapshot as e:
raise exc.HTTPBadRequest(explanation=str(e))
except (exception.GroupSnapshotNotFound,
exception.PolicyNotAuthorized):
# Exceptions will be handled at the wsgi level
raise
except Exception:
msg = _('Failed to delete the cgsnapshot')
raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=HTTPStatus.ACCEPTED)
def index(self, req):
"""Returns a summary list of cgsnapshots."""
versionutils.report_deprecated_feature(LOG, DEPRECATE_CGSNAP_API_MSG)
return self._get_cgsnapshots(req, is_detail=False)
def detail(self, req):
"""Returns a detailed list of cgsnapshots."""
versionutils.report_deprecated_feature(LOG, DEPRECATE_CGSNAP_API_MSG)
return self._get_cgsnapshots(req, is_detail=True)
def _get_cg(self, context, id):
# Not found exception will be handled at the wsgi level
consistencygroup = self.group_snapshot_api.get(context, group_id=id)
return consistencygroup
def _get_cgsnapshot(self, context, id):
# Not found exception will be handled at the wsgi level
cgsnapshot = self.group_snapshot_api.get_group_snapshot(
context, group_snapshot_id=id)
return cgsnapshot
def _get_cgsnapshots(self, req, is_detail):
"""Returns a list of cgsnapshots, transformed through view builder."""
context = req.environ['cinder.context']
grp_snapshots = self.group_snapshot_api.get_all_group_snapshots(
context)
grpsnap_limited_list = common.limited(grp_snapshots, req)
if is_detail:
grp_snapshots = self._view_builder.detail_list(
req, grpsnap_limited_list)
else:
grp_snapshots = self._view_builder.summary_list(
req, grpsnap_limited_list)
return grp_snapshots
@wsgi.response(HTTPStatus.ACCEPTED)
@validation.schema(schema.create)
def create(self, req, body):
"""Create a new cgsnapshot."""
versionutils.report_deprecated_feature(LOG, DEPRECATE_CGSNAP_API_MSG)
LOG.debug('Creating new cgsnapshot %s', body)
context = req.environ['cinder.context']
cgsnapshot = body['cgsnapshot']
group_id = cgsnapshot['consistencygroup_id']
self.clean_name_and_description(cgsnapshot)
name = cgsnapshot.get('name', None)
description = cgsnapshot.get('description', None)
LOG.info("Creating cgsnapshot %(name)s.",
{'name': name},
context=context)
# Not found exception will be handled at the wsgi level
group = self._get_cg(context, group_id)
try:
new_cgsnapshot = self.group_snapshot_api.create_group_snapshot(
context, group, name, description)
# Not found exception will be handled at the wsgi level
except (exception.InvalidGroup,
exception.InvalidGroupSnapshot,
exception.InvalidVolume) as error:
raise exc.HTTPBadRequest(explanation=error.msg)
retval = self._view_builder.summary(req, new_cgsnapshot)
return retval
class Cgsnapshots(extensions.ExtensionDescriptor):
"""cgsnapshots support."""
name = 'Cgsnapshots'
alias = 'cgsnapshots'
updated = '2014-08-18T00:00:00+00:00'
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
Cgsnapshots.alias, CgsnapshotsController(),
collection_actions={'detail': 'GET'})
resources.append(res)
return resources
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/consistencygroups.py 0000664 0000000 0000000 00000032077 15131732575 0026231 0 ustar 00root root 0000000 0000000 # Copyright (C) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The consistencygroups api."""
from http import HTTPStatus
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_utils import strutils
import webob
from webob import exc
from cinder.api import common
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.schemas import consistencygroups as schema
from cinder.api import validation
from cinder.api.views import consistencygroups as consistencygroup_views
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _
from cinder.policies import group_actions as gp_action_policy
from cinder.policies import groups as group_policy
from cinder.volume import group_types
LOG = logging.getLogger(__name__)
DEPRECATE_CG_API_MSG = ("Consistency Group APIs are deprecated. "
"Use Generic Volume Group APIs instead.")
class ConsistencyGroupsController(wsgi.Controller):
"""The ConsistencyGroups API controller for the OpenStack API."""
_view_builder_class = consistencygroup_views.ViewBuilder
def __init__(self):
self.group_api = group_api.API()
super(ConsistencyGroupsController, self).__init__()
def show(self, req, id):
"""Return data about the given consistency group."""
versionutils.report_deprecated_feature(LOG, DEPRECATE_CG_API_MSG)
LOG.debug('show called for member %s', id)
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
consistencygroup = self._get(context, id)
return self._view_builder.detail(req, consistencygroup)
@validation.schema(schema.delete)
def delete(self, req, id, body):
"""Delete a consistency group."""
versionutils.report_deprecated_feature(LOG, DEPRECATE_CG_API_MSG)
LOG.debug('delete called for member %s', id)
context = req.environ['cinder.context']
force = False
if body:
cg_body = body['consistencygroup']
# We wrap this in a try-except "to be sure to be sure" but the
# schema will ensure we never get unexpected values here
try:
force = strutils.bool_from_string(cg_body.get('force', False),
strict=True)
except ValueError:
msg = _("Invalid value '%s' for force.") % force
raise exc.HTTPBadRequest(explanation=msg)
LOG.info('Delete consistency group with id: %s', id)
try:
group = self._get(context, id)
context.authorize(gp_action_policy.DELETE_POLICY, target_obj=group)
self.group_api.delete(context, group, force)
# Not found exception will be handled at the wsgi level
except exception.InvalidConsistencyGroup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
return webob.Response(status_int=HTTPStatus.ACCEPTED)
def index(self, req):
"""Returns a summary list of consistency groups."""
versionutils.report_deprecated_feature(LOG, DEPRECATE_CG_API_MSG)
return self._get_consistencygroups(req, is_detail=False)
def detail(self, req):
"""Returns a detailed list of consistency groups."""
versionutils.report_deprecated_feature(LOG, DEPRECATE_CG_API_MSG)
return self._get_consistencygroups(req, is_detail=True)
def _get(self, context, id):
# Not found exception will be handled at the wsgi level
consistencygroup = self.group_api.get(context, group_id=id)
return consistencygroup
def _get_cgsnapshot(self, context, id):
# Not found exception will be handled at the wsgi level
cgsnapshot = self.group_api.get_group_snapshot(
context,
group_snapshot_id=id)
return cgsnapshot
def _get_consistencygroups(self, req, is_detail):
"""Returns a list of consistency groups through view builder."""
context = req.environ['cinder.context']
context.authorize(group_policy.GET_ALL_POLICY)
filters = req.params.copy()
# make another copy of filters, since it is being modified in
# consistencygroup_api while getting consistencygroups
marker, limit, offset = common.get_pagination_params(filters)
sort_keys, sort_dirs = common.get_sort_params(filters)
groups = self.group_api.get_all(
context, filters=filters, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs)
if is_detail:
groups = self._view_builder.detail_list(req, groups)
else:
groups = self._view_builder.summary_list(req, groups)
return groups
@wsgi.response(HTTPStatus.ACCEPTED)
@validation.schema(schema.create)
def create(self, req, body):
"""Create a new consistency group."""
versionutils.report_deprecated_feature(LOG, DEPRECATE_CG_API_MSG)
LOG.debug('Creating new consistency group %s', body)
context = req.environ['cinder.context']
context.authorize(group_policy.CREATE_POLICY)
consistencygroup = body['consistencygroup']
self.clean_name_and_description(consistencygroup)
name = consistencygroup.get('name', None)
description = consistencygroup.get('description', None)
volume_types = consistencygroup.get('volume_types', None)
if not volume_types:
msg = _("volume_types must be provided to create "
"consistency group %(name)s.") % {'name': name}
raise exc.HTTPBadRequest(explanation=msg)
volume_types = volume_types.rstrip(',').split(',')
availability_zone = consistencygroup.get('availability_zone', None)
group_type = group_types.get_default_cgsnapshot_type()
if not group_type:
msg = (_('Group type %s not found. Rerun migration script to '
'create the default cgsnapshot type.') %
group_types.DEFAULT_CGSNAPSHOT_TYPE)
raise exc.HTTPBadRequest(explanation=msg)
LOG.info("Creating consistency group %(name)s.",
{'name': name})
try:
new_consistencygroup = self.group_api.create(
context, name, description, group_type['id'], volume_types,
availability_zone=availability_zone)
except (exception.InvalidConsistencyGroup,
exception.InvalidGroup,
exception.InvalidVolumeType,
exception.ObjectActionError) as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.NotFound:
# Not found exception will be handled at the wsgi level
raise
retval = self._view_builder.summary(req, new_consistencygroup)
return retval
@wsgi.response(HTTPStatus.ACCEPTED)
@validation.schema(schema.create_from_src)
def create_from_src(self, req, body):
"""Create a new consistency group from a source.
The source can be a CG snapshot or a CG. Note that
this does not require volume_types as the "create"
API above.
"""
versionutils.report_deprecated_feature(LOG, DEPRECATE_CG_API_MSG)
LOG.debug('Creating new consistency group %s.', body)
context = req.environ['cinder.context']
context.authorize(group_policy.CREATE_POLICY)
consistencygroup = body['consistencygroup-from-src']
self.clean_name_and_description(consistencygroup)
name = consistencygroup.get('name', None)
description = consistencygroup.get('description', None)
cgsnapshot_id = consistencygroup.get('cgsnapshot_id', None)
source_cgid = consistencygroup.get('source_cgid', None)
if not cgsnapshot_id and not source_cgid:
msg = _("Either 'cgsnapshot_id' or 'source_cgid' must be "
"provided to create consistency group %(name)s "
"from source.") % {'name': name}
raise exc.HTTPBadRequest(explanation=msg)
if cgsnapshot_id and source_cgid:
msg = _("Cannot provide both 'cgsnapshot_id' and 'source_cgid' "
"to create consistency group %(name)s from "
"source.") % {'name': name}
raise exc.HTTPBadRequest(explanation=msg)
if cgsnapshot_id:
LOG.info("Creating consistency group %(name)s from "
"cgsnapshot %(snap)s.",
{'name': name, 'snap': cgsnapshot_id})
elif source_cgid:
LOG.info("Creating consistency group %(name)s from "
"source consistency group %(source_cgid)s.",
{'name': name, 'source_cgid': source_cgid})
try:
if source_cgid:
self._get(context, source_cgid)
if cgsnapshot_id:
self._get_cgsnapshot(context, cgsnapshot_id)
new_group = self.group_api.create_from_src(
context, name, description, cgsnapshot_id, source_cgid)
except exception.NotFound:
# Not found exception will be handled at the wsgi level
raise
except exception.CinderException as error:
raise exc.HTTPBadRequest(explanation=error.msg)
retval = self._view_builder.summary(req, new_group)
return retval
def _check_update_parameters(self, name, description, add_volumes,
remove_volumes):
if not (name or description or add_volumes or remove_volumes):
msg = _("Name, description, add_volumes, and remove_volumes "
"can not be all empty in the request body.")
raise exc.HTTPBadRequest(explanation=msg)
def _update(self, context, group, name, description, add_volumes,
remove_volumes,
allow_empty=False):
LOG.info("Updating consistency group %(id)s with name %(name)s "
"description: %(description)s add_volumes: "
"%(add_volumes)s remove_volumes: %(remove_volumes)s.",
{'id': group.id,
'name': name,
'description': description,
'add_volumes': add_volumes,
'remove_volumes': remove_volumes})
self.group_api.update(context, group, name, description,
add_volumes, remove_volumes)
@validation.schema(schema.update)
def update(self, req, id, body):
"""Update the consistency group.
Expected format of the input parameter 'body':
.. code-block:: json
{
"consistencygroup":
{
"name": "my_cg",
"description": "My consistency group",
"add_volumes": "volume-uuid-1,volume-uuid-2,...",
"remove_volumes": "volume-uuid-8,volume-uuid-9,..."
}
}
"""
versionutils.report_deprecated_feature(LOG, DEPRECATE_CG_API_MSG)
LOG.debug('Update called for consistency group %s.', id)
if not body:
msg = _("Missing request body.")
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['cinder.context']
group = self._get(context, id)
context.authorize(group_policy.UPDATE_POLICY, target_obj=group)
consistencygroup = body['consistencygroup']
self.clean_name_and_description(consistencygroup)
name = consistencygroup.get('name', None)
description = consistencygroup.get('description', None)
add_volumes = consistencygroup.get('add_volumes', None)
remove_volumes = consistencygroup.get('remove_volumes', None)
self._check_update_parameters(name, description, add_volumes,
remove_volumes)
self._update(context, group, name, description, add_volumes,
remove_volumes)
return webob.Response(status_int=HTTPStatus.ACCEPTED)
class Consistencygroups(extensions.ExtensionDescriptor):
"""consistency groups support."""
name = 'Consistencygroups'
alias = 'consistencygroups'
updated = '2014-08-18T00:00:00+00:00'
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
Consistencygroups.alias, ConsistencyGroupsController(),
collection_actions={'detail': 'GET', 'create_from_src': 'POST'},
member_actions={'delete': 'POST', 'update': 'PUT'})
resources.append(res)
return resources
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/extended_services.py 0000664 0000000 0000000 00000001510 15131732575 0026117 0 ustar 00root root 0000000 0000000 # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import extensions
class Extended_services(extensions.ExtensionDescriptor):
"""Extended services support."""
name = "ExtendedServices"
alias = "os-extended-services"
updated = "2014-01-10T00:00:00-00:00"
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/extended_snapshot_attributes.py 0000664 0000000 0000000 00000004373 15131732575 0030413 0 ustar 00root root 0000000 0000000 # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Extended Snapshot Attributes API extension."""
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.policies import snapshots as policy
class ExtendedSnapshotAttributesController(wsgi.Controller):
def _extend_snapshot(self, req, resp_snap):
db_snap = req.get_db_snapshot(resp_snap['id'])
for attr in ['project_id', 'progress']:
key = "%s:%s" % (Extended_snapshot_attributes.alias, attr)
resp_snap[key] = db_snap[attr]
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['cinder.context']
if context.authorize(policy.EXTEND_ATTRIBUTE, fatal=False):
# Attach our slave template to the response object
snapshot = resp_obj.obj['snapshot']
self._extend_snapshot(req, snapshot)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['cinder.context']
if context.authorize(policy.EXTEND_ATTRIBUTE, fatal=False):
# Attach our slave template to the response object
for snapshot in list(resp_obj.obj['snapshots']):
self._extend_snapshot(req, snapshot)
class Extended_snapshot_attributes(extensions.ExtensionDescriptor):
"""Extended SnapshotAttributes support."""
name = "ExtendedSnapshotAttributes"
alias = "os-extended-snapshot-attributes"
updated = "2012-06-19T00:00:00+00:00"
def get_controller_extensions(self):
controller = ExtendedSnapshotAttributesController()
extension = extensions.ControllerExtension(self, 'snapshots',
controller)
return [extension]
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/hosts.py 0000664 0000000 0000000 00000020002 15131732575 0023551 0 ustar 00root root 0000000 0000000 # Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hosts admin extension."""
import functools
from oslo_config import cfg
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_utils import timeutils
import webob.exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.schemas import hosts as schema
from cinder.api import validation
from cinder.common import constants
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.policies import hosts as policy
from cinder.volume import api as volume_api
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _list_hosts(req, service=None):
"""Returns a summary list of hosts."""
curr_time = timeutils.utcnow(with_timezone=True)
context = req.environ['cinder.context']
filters = {'disabled': False}
services = objects.ServiceList.get_all(context, filters)
zone = ''
if 'zone' in req.GET:
zone = req.GET['zone']
if zone:
services = [s for s in services if s['availability_zone'] == zone]
hosts = []
for host in services:
delta = curr_time - (host.updated_at or host.created_at)
alive = abs(delta.total_seconds()) <= CONF.service_down_time
status = "available" if alive else "unavailable"
active = 'enabled'
if host.disabled:
active = 'disabled'
LOG.debug('status, active and update: %s, %s, %s',
status, active, host.updated_at)
updated_at = host.updated_at
if updated_at:
updated_at = timeutils.normalize_time(updated_at)
hosts.append({'host_name': host.host,
'service': host.topic,
'zone': host.availability_zone,
'service-status': status,
'service-state': active,
'last-update': updated_at,
})
if service:
hosts = [host for host in hosts
if host['service'] == service]
return hosts
def check_host(f):
"""Makes sure that the host exists."""
@functools.wraps(f)
def wrapped(self, req, id, service=None, *args, **kwargs):
listed_hosts = _list_hosts(req, service)
hosts = [h["host_name"] for h in listed_hosts]
if id in hosts:
return f(self, req, id, *args, **kwargs)
raise exception.HostNotFound(host=id)
return wrapped
class HostController(wsgi.Controller):
"""The Hosts API controller for the OpenStack API."""
def __init__(self):
self.api = volume_api.HostAPI()
super(HostController, self).__init__()
versionutils.report_deprecated_feature(
LOG,
"The Host API is deprecated and will be "
"be removed in a future version.")
def index(self, req):
context = req.environ['cinder.context']
context.authorize(policy.MANAGE_POLICY)
return {'hosts': _list_hosts(req)}
@check_host
@validation.schema(schema.update)
def update(self, req, id, body):
context = req.environ['cinder.context']
context.authorize(policy.MANAGE_POLICY)
update_values = {}
for raw_key, raw_val in body.items():
key = raw_key.lower().strip()
val = raw_val.lower().strip()
if key == "status":
if val in ("enable", "disable"):
update_values['status'] = val.startswith("enable")
else:
explanation = _("Invalid status: '%s'") % raw_val
raise webob.exc.HTTPBadRequest(explanation=explanation)
else:
explanation = _("Invalid update setting: '%s'") % raw_key
raise webob.exc.HTTPBadRequest(explanation=explanation)
update_setters = {'status': self._set_enabled_status}
result = {}
for key, value in update_values.items():
result.update(update_setters[key](req, id, value))
return result
def _set_enabled_status(self, req, host, enabled):
"""Sets the specified host's ability to accept new volumes."""
context = req.environ['cinder.context']
state = "enabled" if enabled else "disabled"
LOG.info("Setting host %(host)s to %(state)s.",
{'host': host, 'state': state})
result = self.api.set_host_enabled(context,
host=host,
enabled=enabled)
if result not in ("enabled", "disabled"):
# An error message was returned
raise webob.exc.HTTPBadRequest(explanation=result)
return {"host": host, "status": result}
def show(self, req, id):
"""Shows the volume usage info given by hosts.
:param req: security context
:param id: hostname
:returns: dict -- the host resources dictionary.
ex.::
{'host': [{'resource': D},..]}
D: {'host': 'hostname','project': 'admin',
'volume_count': 1, 'total_volume_gb': 2048}
"""
host = id
context = req.environ['cinder.context']
context.authorize(policy.MANAGE_POLICY)
# Not found exception will be handled at the wsgi level
host_ref = objects.Service.get_by_host_and_topic(
context, host, constants.VOLUME_TOPIC)
# Getting total available/used resource on a host.
volume_refs = db.volume_get_all_by_host(context, host_ref.host)
(count, vol_sum) = db.volume_data_get_for_host(context,
host_ref.host)
snap_count_total = 0
snap_sum_total = 0
resources = [{'resource': {'host': host, 'project': '(total)',
'volume_count': str(count),
'total_volume_gb': str(vol_sum),
'snapshot_count': str(snap_count_total),
'total_snapshot_gb': str(snap_sum_total)}}]
project_ids = [v['project_id'] for v in volume_refs]
project_ids = list(set(project_ids))
for project_id in project_ids:
(count, vol_sum) = db.volume_data_get_for_project(
context, project_id, host=host_ref.host)
(snap_count, snap_sum) = (
objects.Snapshot.snapshot_data_get_for_project(
context, project_id, host=host_ref.host))
resources.append(
{'resource':
{'host': host,
'project': project_id,
'volume_count': str(count),
'total_volume_gb': str(vol_sum),
'snapshot_count': str(snap_count),
'total_snapshot_gb': str(snap_sum)}})
snap_count_total += int(snap_count)
snap_sum_total += int(snap_sum)
resources[0]['resource']['snapshot_count'] = str(snap_count_total)
resources[0]['resource']['total_snapshot_gb'] = str(snap_sum_total)
return {"host": resources}
class Hosts(extensions.ExtensionDescriptor):
"""Admin-only host administration."""
name = "Hosts"
alias = "os-hosts"
updated = "2011-06-29T00:00:00+00:00"
def get_resources(self):
resources = [extensions.ResourceExtension(
'os-hosts', HostController(),
collection_actions={'update': 'PUT'})]
return resources
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/qos_specs_manage.py 0000664 0000000 0000000 00000042161 15131732575 0025732 0 ustar 00root root 0000000 0000000 # Copyright (c) 2013 eBay Inc.
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The QoS specs extension"""
from http import HTTPStatus
from oslo_log import log as logging
from oslo_utils import timeutils
import webob
from cinder.api import api_utils
from cinder.api import common
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.schemas import qos_specs as schema
from cinder.api import validation
from cinder.api.views import qos_specs as view_qos_specs
from cinder import exception
from cinder.i18n import _
from cinder.policies import qos_specs as policy
from cinder import rpc
from cinder import utils
from cinder.volume import qos_specs
LOG = logging.getLogger(__name__)
def _check_specs(context, specs_id):
# Not found exception will be handled at the wsgi level
qos_specs.get_qos_specs(context, specs_id)
class QoSSpecsController(wsgi.Controller):
"""The volume type extra specs API controller for the OpenStack API."""
_view_builder_class = view_qos_specs.ViewBuilder
@staticmethod
@utils.if_notifications_enabled
def _notify_qos_specs_error(context, method, payload):
rpc.get_notifier('QoSSpecs').error(context,
method,
payload)
def index(self, req):
"""Returns the list of qos_specs."""
context = req.environ['cinder.context']
context.authorize(policy.GET_ALL_POLICY)
params = req.params.copy()
marker, limit, offset = common.get_pagination_params(params)
sort_keys, sort_dirs = common.get_sort_params(params)
filters = params
allowed_search_options = ('id', 'name', 'consumer')
api_utils.remove_invalid_filter_options(context, filters,
allowed_search_options)
specs = qos_specs.get_all_specs(context, filters=filters,
marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys,
sort_dirs=sort_dirs)
return self._view_builder.summary_list(req, specs)
@validation.schema(schema.create)
def create(self, req, body=None):
context = req.environ['cinder.context']
context.authorize(policy.CREATE_POLICY)
specs = body['qos_specs']
name = specs.pop('name', None)
name = name.strip()
try:
spec = qos_specs.create(context, name, specs)
notifier_info = dict(name=name,
created_at=spec.created_at,
specs=specs)
rpc.get_notifier('QoSSpecs').info(context,
'qos_specs.create',
notifier_info)
except exception.InvalidQoSSpecs as err:
notifier_err = dict(name=name, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.create',
notifier_err)
raise webob.exc.HTTPBadRequest(explanation=str(err))
except exception.QoSSpecsExists as err:
notifier_err = dict(name=name, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.create',
notifier_err)
raise webob.exc.HTTPConflict(explanation=str(err))
except exception.QoSSpecsCreateFailed as err:
notifier_err = dict(name=name, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.create',
notifier_err)
raise webob.exc.HTTPInternalServerError(
explanation=str(err))
return self._view_builder.detail(req, spec)
@validation.schema(schema.set)
def update(self, req, id, body=None):
context = req.environ['cinder.context']
context.authorize(policy.UPDATE_POLICY)
specs = body['qos_specs']
try:
spec = qos_specs.get_qos_specs(context, id)
qos_specs.update(context, id, specs)
notifier_info = dict(id=id,
created_at=spec.created_at,
updated_at=timeutils.utcnow(),
specs=specs)
rpc.get_notifier('QoSSpecs').info(context,
'qos_specs.update',
notifier_info)
except (exception.QoSSpecsNotFound, exception.InvalidQoSSpecs) as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.update',
notifier_err)
# Not found exception will be handled at the wsgi level
raise
except exception.QoSSpecsUpdateFailed as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.update',
notifier_err)
raise webob.exc.HTTPInternalServerError(
explanation=str(err))
return body
def show(self, req, id):
"""Return a single qos spec item."""
context = req.environ['cinder.context']
context.authorize(policy.GET_POLICY)
# Not found exception will be handled at the wsgi level
spec = qos_specs.get_qos_specs(context, id)
return self._view_builder.detail(req, spec)
def delete(self, req, id):
"""Deletes an existing qos specs."""
context = req.environ['cinder.context']
context.authorize(policy.DELETE_POLICY)
# Convert string to bool type in strict manner
force = utils.get_bool_param('force', req.params)
LOG.debug("Delete qos_spec: %(id)s, force: %(force)s",
{'id': id, 'force': force})
try:
spec = qos_specs.get_qos_specs(context, id)
qos_specs.delete(context, id, force)
notifier_info = dict(id=id,
created_at=spec.created_at,
deleted_at=timeutils.utcnow())
rpc.get_notifier('QoSSpecs').info(context,
'qos_specs.delete',
notifier_info)
except exception.QoSSpecsNotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.delete',
notifier_err)
# Not found exception will be handled at the wsgi level
raise
except exception.QoSSpecsInUse as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.delete',
notifier_err)
if force:
msg = _('Failed to disassociate qos specs.')
raise webob.exc.HTTPInternalServerError(explanation=msg)
msg = _('Qos specs still in use.')
raise webob.exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=HTTPStatus.ACCEPTED)
@validation.schema(schema.unset)
def delete_keys(self, req, id, body):
"""Deletes specified keys in qos specs."""
context = req.environ['cinder.context']
context.authorize(policy.DELETE_POLICY)
keys = body['keys']
LOG.debug("Delete_key spec: %(id)s, keys: %(keys)s",
{'id': id, 'keys': keys})
try:
qos_specs.delete_keys(context, id, keys)
spec = qos_specs.get_qos_specs(context, id)
notifier_info = dict(id=id,
created_at=spec.created_at,
updated_at=spec.updated_at)
rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.delete_keys',
notifier_info)
except exception.NotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.delete_keys',
notifier_err)
# Not found exception will be handled at the wsgi level
raise
return webob.Response(status_int=HTTPStatus.ACCEPTED)
def associations(self, req, id):
"""List all associations of given qos specs."""
context = req.environ['cinder.context']
context.authorize(policy.GET_ALL_POLICY)
LOG.debug("Get associations for qos_spec id: %s", id)
try:
spec = qos_specs.get_qos_specs(context, id)
associates = qos_specs.get_associations(context, id)
notifier_info = dict(id=id,
created_at=spec.created_at)
rpc.get_notifier('QoSSpecs').info(context,
'qos_specs.associations',
notifier_info)
except exception.QoSSpecsNotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.associations',
notifier_err)
# Not found exception will be handled at the wsgi level
raise
except exception.CinderException as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.associations',
notifier_err)
raise webob.exc.HTTPInternalServerError(
explanation=str(err))
return self._view_builder.associations(req, associates)
def associate(self, req, id):
"""Associate a qos specs with a volume type."""
context = req.environ['cinder.context']
context.authorize(policy.UPDATE_POLICY)
type_id = req.params.get('vol_type_id', None)
if not type_id:
msg = _('Volume Type id must not be None.')
notifier_err = dict(id=id, error_message=msg)
self._notify_qos_specs_error(context,
'qos_specs.delete',
notifier_err)
raise webob.exc.HTTPBadRequest(explanation=msg)
LOG.debug("Associate qos_spec: %(id)s with type: %(type_id)s",
{'id': id, 'type_id': type_id})
try:
spec = qos_specs.get_qos_specs(context, id)
qos_specs.associate_qos_with_type(context, id, type_id)
notifier_info = dict(id=id, type_id=type_id,
created_at=spec.created_at)
rpc.get_notifier('QoSSpecs').info(context,
'qos_specs.associate',
notifier_info)
except exception.NotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.associate',
notifier_err)
# Not found exception will be handled at the wsgi level
raise
except exception.InvalidVolumeType as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.associate',
notifier_err)
self._notify_qos_specs_error(context,
'qos_specs.associate',
notifier_err)
raise webob.exc.HTTPBadRequest(explanation=str(err))
except exception.QoSSpecsAssociateFailed as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.associate',
notifier_err)
raise webob.exc.HTTPInternalServerError(
explanation=str(err))
return webob.Response(status_int=HTTPStatus.ACCEPTED)
def disassociate(self, req, id):
"""Disassociate a qos specs from a volume type."""
context = req.environ['cinder.context']
context.authorize(policy.UPDATE_POLICY)
type_id = req.params.get('vol_type_id', None)
if not type_id:
msg = _('Volume Type id must not be None.')
notifier_err = dict(id=id, error_message=msg)
self._notify_qos_specs_error(context,
'qos_specs.delete',
notifier_err)
raise webob.exc.HTTPBadRequest(explanation=msg)
LOG.debug("Disassociate qos_spec: %(id)s from type: %(type_id)s",
{'id': id, 'type_id': type_id})
try:
spec = qos_specs.get_qos_specs(context, id)
qos_specs.disassociate_qos_specs(context, id, type_id)
notifier_info = dict(id=id, type_id=type_id,
created_at=spec.created_at)
rpc.get_notifier('QoSSpecs').info(context,
'qos_specs.disassociate',
notifier_info)
except exception.NotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.disassociate',
notifier_err)
# Not found exception will be handled at the wsgi level
raise
except exception.QoSSpecsDisassociateFailed as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.disassociate',
notifier_err)
raise webob.exc.HTTPInternalServerError(
explanation=str(err))
return webob.Response(status_int=HTTPStatus.ACCEPTED)
def disassociate_all(self, req, id):
"""Disassociate a qos specs from all volume types."""
context = req.environ['cinder.context']
context.authorize(policy.UPDATE_POLICY)
LOG.debug("Disassociate qos_spec: %s from all.", id)
try:
spec = qos_specs.get_qos_specs(context, id)
qos_specs.disassociate_all(context, id)
notifier_info = dict(id=id,
created_at=spec.created_at)
rpc.get_notifier('QoSSpecs').info(context,
'qos_specs.disassociate_all',
notifier_info)
except exception.QoSSpecsNotFound as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.disassociate_all',
notifier_err)
# Not found exception will be handled at the wsgi level
raise
except exception.QoSSpecsDisassociateFailed as err:
notifier_err = dict(id=id, error_message=err)
self._notify_qos_specs_error(context,
'qos_specs.disassociate_all',
notifier_err)
raise webob.exc.HTTPInternalServerError(
explanation=str(err))
return webob.Response(status_int=HTTPStatus.ACCEPTED)
class Qos_specs_manage(extensions.ExtensionDescriptor):
"""QoS specs support."""
name = "Qos_specs_manage"
alias = "qos-specs"
updated = "2013-08-02T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
Qos_specs_manage.alias,
QoSSpecsController(),
member_actions={"associations": "GET",
"associate": "GET",
"disassociate": "GET",
"disassociate_all": "GET",
"delete_keys": "PUT"})
resources.append(res)
return resources
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/quota_classes.py 0000664 0000000 0000000 00000006115 15131732575 0025270 0 ustar 00root root 0000000 0000000 # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.schemas import quota_classes as schema
from cinder.api import validation
from cinder import db
from cinder import exception
from cinder.policies import quota_class as policy
from cinder import quota
QUOTAS = quota.QUOTAS
GROUP_QUOTAS = quota.GROUP_QUOTAS
class QuotaClassSetsController(wsgi.Controller):
def _format_quota_set(self, quota_class, quota_set):
"""Convert the quota object to a result dict."""
quota_set['id'] = str(quota_class)
return dict(quota_class_set=quota_set)
def show(self, req, id):
context = req.environ['cinder.context']
context.authorize(policy.GET_POLICY)
try:
db.sqlalchemy.api.authorize_quota_class_context(context, id)
except exception.NotAuthorized:
raise webob.exc.HTTPForbidden()
quota_set = QUOTAS.get_class_quotas(context, id)
group_quota_set = GROUP_QUOTAS.get_class_quotas(context, id)
quota_set.update(group_quota_set)
return self._format_quota_set(id, quota_set)
@validation.schema(schema.update_quota_class)
def update(self, req, id, body):
context = req.environ['cinder.context']
context.authorize(policy.UPDATE_POLICY)
self.validate_string_length(id, 'quota_class_name',
min_length=1, max_length=255)
quota_class = id
for key, value in body['quota_class_set'].items():
try:
db.quota_class_update(context, quota_class, key, value)
except exception.QuotaClassNotFound:
db.quota_class_create(context, quota_class, key, value)
except exception.AdminRequired:
raise webob.exc.HTTPForbidden()
quota_set = QUOTAS.get_class_quotas(context, quota_class)
group_quota_set = GROUP_QUOTAS.get_class_quotas(context, quota_class)
quota_set.update(group_quota_set)
return {'quota_class_set': quota_set}
class Quota_classes(extensions.ExtensionDescriptor):
"""Quota classes management support."""
name = "QuotaClasses"
alias = "os-quota-class-sets"
updated = "2012-03-12T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-quota-class-sets',
QuotaClassSetsController())
resources.append(res)
return resources
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/quotas.py 0000664 0000000 0000000 00000015506 15131732575 0023742 0 ustar 00root root 0000000 0000000 # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.schemas import quotas as schema
from cinder.api import validation
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.policies import quotas as policy
from cinder import quota
from cinder import utils
QUOTAS = quota.QUOTAS
GROUP_QUOTAS = quota.GROUP_QUOTAS
NON_QUOTA_KEYS = quota.NON_QUOTA_KEYS
class QuotaSetsController(wsgi.Controller):
def _format_quota_set(self, project_id, quota_set):
"""Convert the quota object to a result dict."""
quota_set['id'] = str(project_id)
return dict(quota_set=quota_set)
def _validate_existing_resource(self, key, value, quota_values):
# -1 limit will always be greater than the existing value
if key == 'per_volume_gigabytes' or value == -1:
return
v = quota_values.get(key, {})
used = (v.get('in_use', 0) + v.get('reserved', 0))
if value < used:
msg = (_("Quota %(key)s limit must be equal or greater than "
"existing resources. Current usage is %(usage)s "
"and the requested limit is %(limit)s.")
% {'key': key,
'usage': used,
'limit': value})
raise webob.exc.HTTPBadRequest(explanation=msg)
def _get_quotas(self, context, id, usages=False):
values = QUOTAS.get_project_quotas(context, id, usages=usages)
group_values = GROUP_QUOTAS.get_project_quotas(context, id,
usages=usages)
values.update(group_values)
if usages:
return values
else:
return {k: v['limit'] for k, v in values.items()}
def show(self, req, id):
"""Show quota for a particular tenant
:param req: request
:param id: target project id that needs to be shown
"""
context = req.environ['cinder.context']
params = req.params
target_project_id = id
context.authorize(policy.SHOW_POLICY,
target={'project_id': target_project_id})
if not hasattr(params, '__call__') and 'usage' in params:
usage = utils.get_bool_param('usage', params)
else:
usage = False
quotas = self._get_quotas(context, target_project_id, usage)
return self._format_quota_set(target_project_id, quotas)
@validation.schema(schema.update)
def update(self, req, id, body):
"""Update Quota for a particular tenant
:param req: request
:param id: target project id that needs to be updated
:param body: key, value pair that will be applied to
the resources if the update succeeds
"""
context = req.environ['cinder.context']
target_project_id = id
context.authorize(policy.UPDATE_POLICY,
target={'project_id': target_project_id})
self.validate_string_length(id, 'quota_set_name',
min_length=1, max_length=255)
# NOTE(ankit): Pass #1 - In this loop for body['quota_set'].keys(),
# we validate the quota limits to ensure that we can bail out if
# any of the items in the set is bad. Meanwhile we validate value
# to ensure that the value can't be lower than number of existing
# resources.
quota_values = QUOTAS.get_project_quotas(context, target_project_id,
defaults=False)
group_quota_values = GROUP_QUOTAS.get_project_quotas(context,
target_project_id,
defaults=False)
quota_values.update(group_quota_values)
valid_quotas = {}
reservations = []
for key in body['quota_set'].keys():
if key in NON_QUOTA_KEYS:
continue
self._validate_existing_resource(key, body['quota_set'][key],
quota_values)
valid_quotas[key] = body['quota_set'][key]
# NOTE(ankit): Pass #2 - At this point we know that all the keys and
# values are valid and we can iterate and update them all in one shot
# without having to worry about rolling back etc as we have done
# the validation up front in the 2 loops above.
for key, value in valid_quotas.items():
try:
db.quota_update(context, target_project_id, key, value)
except exception.ProjectQuotaNotFound:
db.quota_create(context, target_project_id, key, value)
except exception.AdminRequired:
raise webob.exc.HTTPForbidden()
if reservations:
db.reservation_commit(context, reservations)
return {'quota_set': self._get_quotas(context, target_project_id)}
def _get_quota_usage(self, quota_obj):
return (quota_obj.get('in_use', 0) + quota_obj.get('reserved', 0))
def defaults(self, req, id):
context = req.environ['cinder.context']
context.authorize(policy.SHOW_POLICY, target={'project_id': id})
defaults = QUOTAS.get_defaults(context, project_id=id)
group_defaults = GROUP_QUOTAS.get_defaults(context, project_id=id)
defaults.update(group_defaults)
return self._format_quota_set(id, defaults)
def delete(self, req, id):
"""Delete Quota for a particular tenant.
:param req: request
:param id: target project id that needs to be deleted
"""
context = req.environ['cinder.context']
context.authorize(policy.DELETE_POLICY, target={'project_id': id})
db.quota_destroy_by_project(context, id)
class Quotas(extensions.ExtensionDescriptor):
"""Quota management support."""
name = "Quotas"
alias = "os-quota-sets"
updated = "2011-08-08T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
'os-quota-sets', QuotaSetsController(),
member_actions={'defaults': 'GET'})
resources.append(res)
return resources
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/resource_common_manage.py 0000664 0000000 0000000 00000005032 15131732575 0027126 0 ustar 00root root 0000000 0000000 # Copyright (c) 2016 Stratoscale, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_messaging as messaging
from cinder.api import common
from cinder.api import microversions as mv
from cinder import exception
from cinder.i18n import _
def get_manageable_resources(req, is_detail, function_get_manageable,
view_builder):
context = req.environ['cinder.context']
params = req.params.copy()
cluster_name, host = common.get_cluster_host(
req, params, mv.MANAGE_EXISTING_CLUSTER)
marker, limit, offset = common.get_pagination_params(params)
sort_keys, sort_dirs = common.get_sort_params(params,
default_key='reference')
# These parameters are generally validated at the DB layer, but in this
# case sorting is not done by the DB
valid_sort_keys = ('reference', 'size')
invalid_keys = [key for key in sort_keys if key not in valid_sort_keys]
if invalid_keys:
msg = _("Invalid sort keys passed: %s") % ', '.join(invalid_keys)
raise exception.InvalidParameterValue(err=msg)
valid_sort_dirs = ('asc', 'desc')
invalid_dirs = [d for d in sort_dirs if d not in valid_sort_dirs]
if invalid_dirs:
msg = _("Invalid sort dirs passed: %s") % ', '.join(invalid_dirs)
raise exception.InvalidParameterValue(err=msg)
try:
resources = function_get_manageable(context, host, cluster_name,
marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys,
sort_dirs=sort_dirs)
except messaging.RemoteError as err:
if err.exc_type == "InvalidInput":
raise exception.InvalidInput(err.value)
raise
resource_count = len(resources)
if is_detail:
resources = view_builder.detail_list(req, resources, resource_count)
else:
resources = view_builder.summary_list(req, resources, resource_count)
return resources
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/scheduler_hints.py 0000664 0000000 0000000 00000003355 15131732575 0025610 0 ustar 00root root 0000000 0000000 # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import extensions
from cinder.api.schemas import scheduler_hints as schema
from cinder.api import validation
def create(req, body):
attr = 'OS-SCH-HNT:scheduler_hints'
if body.get(attr) is not None:
scheduler_hints_body = dict.fromkeys((attr,), body.get(attr))
@validation.schema(schema.create)
def _validate_scheduler_hints(req=None, body=None):
# TODO(pooja_jadhav): The scheduler hints schema validation
# should be moved to v3 volume schema directly and this module
# should be deleted at the time of deletion of v2 version code.
pass
_validate_scheduler_hints(req=req, body=scheduler_hints_body)
body['volume']['scheduler_hints'] = scheduler_hints_body.get(attr)
return body
# NOTE: This class is added to include "OS-SCH-HNT" in the list extensions
# response and "OS-SCH-HNT" is still not loaded as a standard extension.
class Scheduler_hints(extensions.ExtensionDescriptor):
"""Pass arbitrary key/value pairs to the scheduler."""
name = "SchedulerHints"
alias = "OS-SCH-HNT"
updated = "2013-04-18T00:00:00+00:00"
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/scheduler_stats.py 0000664 0000000 0000000 00000005316 15131732575 0025620 0 ustar 00root root 0000000 0000000 # Copyright (c) 2014 eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Scheduler Stats extension"""
from cinder.api import common
from cinder.api import extensions
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.views import scheduler_stats as scheduler_stats_view
from cinder.policies import scheduler_stats as policy
from cinder.scheduler import rpcapi
from cinder import utils
class SchedulerStatsController(wsgi.Controller):
"""The Scheduler Stats controller for the OpenStack API."""
_view_builder_class = scheduler_stats_view.ViewBuilder
def __init__(self):
self.scheduler_api = rpcapi.SchedulerAPI()
super(SchedulerStatsController, self).__init__()
@common.process_general_filtering('pool')
def _process_pool_filtering(self, context=None, filters=None,
req_version=None):
if not req_version.matches(mv.POOL_FILTER):
filters.clear()
def get_pools(self, req):
"""List all active pools in scheduler."""
context = req.environ['cinder.context']
context.authorize(policy.GET_POOL_POLICY)
detail = utils.get_bool_param('detail', req.params)
req_version = req.api_version_request
filters = req.params.copy()
filters.pop('detail', None)
self._process_pool_filtering(context=context,
filters=filters,
req_version=req_version)
if not req_version.matches(mv.POOL_TYPE_FILTER):
filters.pop('volume_type', None)
pools = self.scheduler_api.get_pools(context, filters=filters)
return self._view_builder.pools(req, pools, detail)
class Scheduler_stats(extensions.ExtensionDescriptor):
"""Scheduler stats support."""
name = "Scheduler_stats"
alias = "scheduler-stats"
updated = "2014-09-07T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
Scheduler_stats.alias,
SchedulerStatsController(),
collection_actions={"get_pools": "GET"})
resources.append(res)
return resources
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/services.py 0000664 0000000 0000000 00000001474 15131732575 0024250 0 ustar 00root root 0000000 0000000 # Copyright 2012 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import extensions
class Services(extensions.ExtensionDescriptor):
"""Services support."""
name = "Services"
alias = "os-services"
updated = "2012-10-28T00:00:00-00:00"
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/snapshot_actions.py 0000664 0000000 0000000 00000007665 15131732575 0026014 0 ustar 00root root 0000000 0000000 # Copyright 2013, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import HTTPStatus
from oslo_log import log as logging
import webob
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.schemas import snapshot_actions as schema
from cinder.api import validation
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
from cinder.policies import snapshot_actions as policy
LOG = logging.getLogger(__name__)
class SnapshotActionsController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SnapshotActionsController, self).__init__(*args, **kwargs)
LOG.debug("SnapshotActionsController initialized")
@wsgi.action('os-update_snapshot_status')
@validation.schema(schema.update_snapshot_status)
def _update_snapshot_status(self, req, id, body):
"""Update database fields related to status of a snapshot.
Intended for creation of snapshots, so snapshot state
must start as 'creating' and be changed to 'available',
'creating', or 'error'.
"""
context = req.environ['cinder.context']
LOG.debug("body: %s", body)
status = body['os-update_snapshot_status']['status']
# Allowed state transitions
status_map = {fields.SnapshotStatus.CREATING:
[fields.SnapshotStatus.CREATING,
fields.SnapshotStatus.AVAILABLE,
fields.SnapshotStatus.ERROR],
fields.SnapshotStatus.DELETING:
[fields.SnapshotStatus.DELETING,
fields.SnapshotStatus.ERROR_DELETING]}
current_snapshot = objects.Snapshot.get_by_id(context, id)
context.authorize(policy.UPDATE_STATUS_POLICY,
target_obj=current_snapshot)
if current_snapshot.status not in status_map:
msg = _("Snapshot status %(cur)s not allowed for "
"update_snapshot_status") % {
'cur': current_snapshot.status}
raise webob.exc.HTTPBadRequest(explanation=msg)
if status not in status_map[current_snapshot.status]:
msg = _("Provided snapshot status %(provided)s not allowed for "
"snapshot with status %(current)s.") % \
{'provided': status,
'current': current_snapshot.status}
raise webob.exc.HTTPBadRequest(explanation=msg)
update_dict = {'id': id,
'status': status}
progress = body['os-update_snapshot_status'].get('progress', None)
if progress:
update_dict.update({'progress': progress})
LOG.info("Updating snapshot %(id)s with info %(dict)s",
{'id': id, 'dict': update_dict})
current_snapshot.update(update_dict)
current_snapshot.save()
return webob.Response(status_int=HTTPStatus.ACCEPTED)
class Snapshot_actions(extensions.ExtensionDescriptor):
"""Enable snapshot manager actions."""
name = "SnapshotActions"
alias = "os-snapshot-actions"
updated = "2013-07-16T00:00:00+00:00"
def get_controller_extensions(self):
controller = SnapshotActionsController()
extension = extensions.ControllerExtension(self,
'snapshots',
controller)
return [extension]
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/snapshot_manage.py 0000664 0000000 0000000 00000012645 15131732575 0025576 0 ustar 00root root 0000000 0000000 # Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import HTTPStatus
from oslo_log import log as logging
from cinder.api.contrib import resource_common_manage
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.schemas import snapshot_manage as schema
from cinder.api import validation
from cinder.api.views import manageable_snapshots as list_manageable_view
from cinder.api.views import snapshots as snapshot_views
from cinder.policies import manageable_snapshots as policy
from cinder import volume as cinder_volume
LOG = logging.getLogger(__name__)
class SnapshotManageController(wsgi.Controller):
"""The /os-snapshot-manage controller for the OpenStack API."""
_view_builder_class = snapshot_views.ViewBuilder
def __init__(self, *args, **kwargs):
super(SnapshotManageController, self).__init__(*args, **kwargs)
self.volume_api = cinder_volume.API()
self._list_manageable_view = list_manageable_view.ViewBuilder()
@wsgi.response(HTTPStatus.ACCEPTED)
@validation.schema(schema.create)
def create(self, req, body):
"""Instruct Cinder to manage a storage snapshot object.
Manages an existing backend storage snapshot object (e.g. a Linux
logical volume or a SAN disk) by creating the Cinder objects required
to manage it, and possibly renaming the backend storage snapshot object
(driver dependent).
From an API perspective, this operation behaves very much like a
snapshot creation operation.
Required HTTP Body:
.. code-block:: json
{
"snapshot":
{
"volume_id": "",
"ref":
""
}
}
See the appropriate Cinder drivers' implementations of the
manage_snapshot method to find out the accepted format of 'ref'.
For example,in LVM driver, it will be the logic volume name of snapshot
which you want to manage.
This API call will return with an error if any of the above elements
are missing from the request, or if the 'volume_id' element refers to
a cinder volume that could not be found.
The snapshot will later enter the error state if it is discovered that
'ref' is bad.
Optional elements to 'snapshot' are::
name A name for the new snapshot.
description A description for the new snapshot.
metadata Key/value pairs to be associated with the new snapshot.
"""
context = req.environ['cinder.context']
snapshot = body['snapshot']
# Check whether volume exists
volume_id = snapshot['volume_id']
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, volume_id)
context.authorize(policy.MANAGE_POLICY, target_obj=volume)
LOG.debug('Manage snapshot request body: %s', body)
snapshot_parameters = {}
snapshot_parameters['metadata'] = snapshot.get('metadata', None)
snapshot_parameters['description'] = snapshot.get('description', None)
snapshot_parameters['name'] = snapshot.get('name')
# Not found exception will be handled at the wsgi level
new_snapshot = self.volume_api.manage_existing_snapshot(
context,
snapshot['ref'],
volume,
**snapshot_parameters)
return self._view_builder.detail(req, new_snapshot)
@wsgi.extends
def index(self, req):
"""Returns a summary list of snapshots available to manage."""
context = req.environ['cinder.context']
context.authorize(policy.LIST_MANAGEABLE_POLICY)
return resource_common_manage.get_manageable_resources(
req, False, self.volume_api.get_manageable_snapshots,
self._list_manageable_view)
@wsgi.extends
def detail(self, req):
"""Returns a detailed list of snapshots available to manage."""
context = req.environ['cinder.context']
context.authorize(policy.LIST_MANAGEABLE_POLICY)
return resource_common_manage.get_manageable_resources(
req, True, self.volume_api.get_manageable_snapshots,
self._list_manageable_view)
class Snapshot_manage(extensions.ExtensionDescriptor):
"""Allows existing backend storage to be 'managed' by Cinder."""
name = 'SnapshotManage'
alias = 'os-snapshot-manage'
updated = '2014-12-31T00:00:00+00:00'
def get_resources(self):
controller = SnapshotManageController()
return [extensions.ResourceExtension(Snapshot_manage.alias,
controller,
collection_actions=
{'detail': 'GET'})]
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/snapshot_unmanage.py 0000664 0000000 0000000 00000005557 15131732575 0026145 0 ustar 00root root 0000000 0000000 # Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import HTTPStatus
from oslo_log import log as logging
import webob
from webob import exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.schemas import snapshot_unmanage as schema
from cinder.api import validation
from cinder import exception
from cinder.policies import manageable_snapshots as policy
from cinder import volume
LOG = logging.getLogger(__name__)
class SnapshotUnmanageController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SnapshotUnmanageController, self).__init__(*args, **kwargs)
self.volume_api = volume.API()
@wsgi.response(HTTPStatus.ACCEPTED)
@wsgi.action('os-unmanage')
@validation.schema(schema.unmanage)
def unmanage(self, req, id, body):
"""Stop managing a snapshot.
This action is very much like a delete, except that a different
method (unmanage) is called on the Cinder driver. This has the effect
of removing the snapshot from Cinder management without actually
removing the backend storage object associated with it.
There are no required parameters.
A Not Found error is returned if the specified snapshot does not exist.
"""
context = req.environ['cinder.context']
LOG.info("Unmanage snapshot with id: %s", id)
try:
snapshot = self.volume_api.get_snapshot(context, id)
context.authorize(policy.UNMANAGE_POLICY, target_obj=snapshot)
self.volume_api.delete_snapshot(context, snapshot,
unmanage_only=True)
# Not found exception will be handled at the wsgi level
except exception.InvalidSnapshot as ex:
raise exc.HTTPBadRequest(explanation=ex.msg)
return webob.Response(status_int=HTTPStatus.ACCEPTED)
class Snapshot_unmanage(extensions.ExtensionDescriptor):
"""Enable volume unmanage operation."""
name = "SnapshotUnmanage"
alias = "os-snapshot-unmanage"
updated = "2014-12-31T00:00:00+00:00"
def get_controller_extensions(self):
controller = SnapshotUnmanageController()
extension = extensions.ControllerExtension(self, 'snapshots',
controller)
return [extension]
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/types_extra_specs.py 0000664 0000000 0000000 00000020062 15131732575 0026163 0 ustar 00root root 0000000 0000000 # Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volume types extra specs extension"""
from http import HTTPStatus
import webob
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.schemas import types_extra_specs as schema
from cinder.api import validation
from cinder import context as ctxt
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder.policies import type_extra_specs as policy
from cinder import rpc
from cinder.volume import volume_types
class VolumeTypeExtraSpecsController(wsgi.Controller):
"""The volume type extra specs API controller for the OpenStack API."""
def _get_extra_specs(self, context, type_id):
extra_specs = db.volume_type_extra_specs_get(context, type_id)
if context.authorize(policy.READ_SENSITIVE_POLICY, fatal=False):
specs_dict = extra_specs
else:
# Limit the response to contain only user visible specs.
specs_dict = {}
for uv_spec in policy.USER_VISIBLE_EXTRA_SPECS:
if uv_spec in extra_specs:
specs_dict[uv_spec] = extra_specs[uv_spec]
return dict(extra_specs=specs_dict)
def _check_type(self, context, type_id):
# Not found exception will be handled at the wsgi level
volume_types.get_volume_type(context, type_id)
def index(self, req, type_id):
"""Returns the list of extra specs for a given volume type."""
context = req.environ['cinder.context']
context.authorize(policy.GET_ALL_POLICY)
self._check_type(context, type_id)
return self._get_extra_specs(context, type_id)
def _allow_update(self, context, type_id):
vols = db.volume_get_all(
ctxt.get_admin_context(),
limit=1,
filters={'volume_type_id': type_id})
if len(vols):
expl = _('Volume Type is currently in use.')
raise webob.exc.HTTPBadRequest(explanation=expl)
return
def _check_cacheable(self, specs, type_id):
extra_specs = volume_types.get_volume_type_extra_specs(type_id)
multiattach = extra_specs.get('multiattach')
cacheable = extra_specs.get('cacheable')
isTrue = ' True'
if (specs.get('multiattach') == isTrue and cacheable == isTrue) or (
specs.get('cacheable') == isTrue and multiattach ==
isTrue) or (specs.get('cacheable') == isTrue and
specs.get('multiattach') == isTrue):
expl = _('cacheable cannot be set with multiattach.')
raise webob.exc.HTTPBadRequest(explanation=expl)
return
@validation.schema(schema.create)
def create(self, req, type_id, body):
context = req.environ['cinder.context']
context.authorize(policy.CREATE_POLICY)
self._allow_update(context, type_id)
self._check_type(context, type_id)
specs = body['extra_specs']
if 'image_service:store_id' in specs:
image_service_store_id = specs['image_service:store_id']
image_utils.validate_stores_id(context, image_service_store_id)
# Check if multiattach be set with cacheable
self._check_cacheable(specs, type_id)
db.volume_type_extra_specs_update_or_create(context,
type_id,
specs)
# Get created_at and updated_at for notification
volume_type = volume_types.get_volume_type(context, type_id)
notifier_info = dict(type_id=type_id, specs=specs,
created_at=volume_type['created_at'],
updated_at=volume_type['updated_at'])
notifier = rpc.get_notifier('volumeTypeExtraSpecs')
notifier.info(context, 'volume_type_extra_specs.create',
notifier_info)
return body
@validation.schema(schema.update)
def update(self, req, type_id, id, body):
context = req.environ['cinder.context']
context.authorize(policy.UPDATE_POLICY)
self._allow_update(context, type_id)
self._check_type(context, type_id)
if id not in body:
expl = _('Request body and URI mismatch')
raise webob.exc.HTTPBadRequest(explanation=expl)
if 'image_service:store_id' in body:
image_service_store_id = body['image_service:store_id']
image_utils.validate_stores_id(context, image_service_store_id)
if 'extra_specs' in body:
specs = body['extra_specs']
# Check if multiattach be set with cacheable
self._check_cacheable(specs, type_id)
db.volume_type_extra_specs_update_or_create(context,
type_id,
body)
# Get created_at and updated_at for notification
volume_type = volume_types.get_volume_type(context, type_id)
notifier_info = dict(type_id=type_id, id=id,
created_at=volume_type['created_at'],
updated_at=volume_type['updated_at'])
notifier = rpc.get_notifier('volumeTypeExtraSpecs')
notifier.info(context,
'volume_type_extra_specs.update',
notifier_info)
return body
def show(self, req, type_id, id):
"""Return a single extra spec item."""
context = req.environ['cinder.context']
context.authorize(policy.GET_POLICY)
self._check_type(context, type_id)
specs = self._get_extra_specs(context, type_id)
if id in specs['extra_specs']:
return {id: specs['extra_specs'][id]}
else:
raise exception.VolumeTypeExtraSpecsNotFound(
volume_type_id=type_id, extra_specs_key=id)
@wsgi.response(HTTPStatus.ACCEPTED)
def delete(self, req, type_id, id):
"""Deletes an existing extra spec."""
context = req.environ['cinder.context']
self._check_type(context, type_id)
context.authorize(policy.DELETE_POLICY)
self._allow_update(context, type_id)
# Not found exception will be handled at the wsgi level
db.volume_type_extra_specs_delete(context, type_id, id)
# Get created_at and updated_at for notification
volume_type = volume_types.get_volume_type(context, type_id)
notifier_info = dict(type_id=type_id, id=id,
created_at=volume_type['created_at'],
updated_at=volume_type['updated_at'],
deleted_at=volume_type['deleted_at'])
notifier = rpc.get_notifier('volumeTypeExtraSpecs')
notifier.info(context,
'volume_type_extra_specs.delete',
notifier_info)
class Types_extra_specs(extensions.ExtensionDescriptor):
"""Type extra specs support."""
name = "TypesExtraSpecs"
alias = "os-types-extra-specs"
updated = "2011-08-24T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('extra_specs',
VolumeTypeExtraSpecsController(),
parent=dict(member_name='type',
collection_name='types')
)
resources.append(res)
return resources
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/types_manage.py 0000664 0000000 0000000 00000020100 15131732575 0025064 0 ustar 00root root 0000000 0000000 # Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volume types manage extension."""
from http import HTTPStatus
from oslo_utils import strutils
import webob
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.schemas import volume_types as schema
from cinder.api import validation
from cinder.api.views import types as views_types
from cinder import exception
from cinder.i18n import _
from cinder.policies import volume_type as policy
from cinder import rpc
from cinder import utils
from cinder.volume import volume_types
class VolumeTypesManageController(wsgi.Controller):
"""The volume types API controller for the OpenStack API."""
_view_builder_class = views_types.ViewBuilder
@utils.if_notifications_enabled
def _notify_volume_type_error(self, context, method, err,
volume_type=None, id=None, name=None):
payload = dict(
volume_types=volume_type, name=name, id=id, error_message=err)
rpc.get_notifier('volumeType').error(context, method, payload)
@utils.if_notifications_enabled
def _notify_volume_type_info(self, context, method, volume_type):
payload = dict(volume_types=volume_type)
rpc.get_notifier('volumeType').info(context, method, payload)
@wsgi.action("create")
@validation.schema(schema.create)
def _create(self, req, body):
"""Creates a new volume type."""
context = req.environ['cinder.context']
context.authorize(policy.CREATE_POLICY)
vol_type = body['volume_type']
name = vol_type['name']
description = vol_type.get('description')
specs = vol_type.get('extra_specs', {})
is_public = vol_type.get('os-volume-type-access:is_public', True)
is_public = strutils.bool_from_string(is_public, strict=True)
try:
volume_types.create(context,
name,
specs,
is_public,
description=description)
vol_type = volume_types.get_volume_type_by_name(context, name)
req.cache_resource(vol_type, name='types')
self._notify_volume_type_info(
context, 'volume_type.create', vol_type)
except exception.VolumeTypeExists as err:
self._notify_volume_type_error(
context, 'volume_type.create', err, volume_type=vol_type)
raise webob.exc.HTTPConflict(explanation=str(err))
except exception.VolumeTypeNotFoundByName as err:
self._notify_volume_type_error(
context, 'volume_type.create', err, name=name)
# Not found exception will be handled at the wsgi level
raise
return self._view_builder.show(req, vol_type)
@wsgi.action("update")
@validation.schema(schema.update)
def _update(self, req, id, body):
# Update description for a given volume type.
context = req.environ['cinder.context']
context.authorize(policy.UPDATE_POLICY)
vol_type = body['volume_type']
description = vol_type.get('description')
name = vol_type.get('name')
is_public = vol_type.get('is_public')
if is_public is not None:
is_public = strutils.bool_from_string(is_public, strict=True)
# If name specified, name can not be empty.
if name and len(name.strip()) == 0:
msg = _("Volume type name can not be empty.")
raise webob.exc.HTTPBadRequest(explanation=msg)
# Name, description and is_public can not be None.
# Specify one of them, or a combination thereof.
if name is None and description is None and is_public is None:
msg = _("Specify volume type name, description, is_public or "
"a combination thereof.")
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
volume_types.update(context, id, name, description,
is_public=is_public)
# Get the updated
vol_type = volume_types.get_volume_type(context, id)
req.cache_resource(vol_type, name='types')
self._notify_volume_type_info(
context, 'volume_type.update', vol_type)
except exception.VolumeTypeNotFound as err:
self._notify_volume_type_error(
context, 'volume_type.update', err, id=id)
# Not found exception will be handled at the wsgi level
raise
except exception.VolumeTypeExists as err:
self._notify_volume_type_error(
context, 'volume_type.update', err, volume_type=vol_type)
raise webob.exc.HTTPConflict(explanation=str(err))
except exception.VolumeTypeUpdateFailed as err:
self._notify_volume_type_error(
context, 'volume_type.update', err, volume_type=vol_type)
raise webob.exc.HTTPInternalServerError(
explanation=str(err))
return self._view_builder.show(req, vol_type)
# FIXME: Because we are registering this as an action, this actually
# results in *two* APIs being registered: 'DELETE /types/{id}' and 'POST
# /types/{id}/action (delete)'. However, only the first of these works as
# expected and attempts to use the latter result in a HTTP 500 and the
# following error in cinder-api logs:
#
# TypeError: VolumeTypesManageController._delete() got an unexpected
# keyword argument 'body'
#
# We should fix this so the incorrect route isn't registered.
@wsgi.action("delete")
def _delete(self, req, id):
"""Deletes an existing volume type."""
context = req.environ['cinder.context']
context.authorize(policy.DELETE_POLICY)
try:
vol_type = volume_types.get_volume_type(context, id)
volume_types.destroy(context, vol_type['id'])
self._notify_volume_type_info(
context, 'volume_type.delete', vol_type)
except exception.VolumeTypeInUse as err:
self._notify_volume_type_error(
context, 'volume_type.delete', err, volume_type=vol_type)
msg = _('Target volume type is still in use.')
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.VolumeTypeNotFound as err:
self._notify_volume_type_error(
context, 'volume_type.delete', err, id=id)
# Not found exception will be handled at the wsgi level
raise
except (exception.VolumeTypeDeletionError,
exception.VolumeTypeDefaultDeletionError) as err:
self._notify_volume_type_error(
context, 'volume_type.delete', err, volume_type=vol_type)
raise webob.exc.HTTPBadRequest(explanation=err.msg)
except exception.VolumeTypeDefaultMisconfiguredError as err:
self._notify_volume_type_error(
context, 'volume_type.delete', err, volume_type=vol_type)
raise webob.exc.HTTPInternalServerError(explanation=err.msg)
return webob.Response(status_int=HTTPStatus.ACCEPTED)
class Types_manage(extensions.ExtensionDescriptor):
"""Types manage support."""
name = "TypesManage"
alias = "os-types-manage"
updated = "2011-08-24T00:00:00+00:00"
def get_controller_extensions(self):
controller = VolumeTypesManageController()
extension = extensions.ControllerExtension(self, 'types', controller)
return [extension]
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/used_limits.py 0000664 0000000 0000000 00000005276 15131732575 0024752 0 ustar 00root root 0000000 0000000 # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import extensions
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.policies import limits as policy
from cinder import quota
QUOTAS = quota.QUOTAS
class UsedLimitsController(wsgi.Controller):
@wsgi.extends
def index(self, req, resp_obj):
context = req.environ['cinder.context']
if context.authorize(
policy.EXTEND_LIMIT_ATTRIBUTE_POLICY, fatal=False):
params = req.params.copy()
req_version = req.api_version_request
# TODO(wangxiyuan): Support "tenant_id" here to keep the backwards
# compatibility. Remove it once we drop all support for "tenant".
if (req_version.matches(None, mv.GROUP_REPLICATION) or
not context.is_admin):
params.pop('project_id', None)
params.pop('tenant_id', None)
project_id = params.get(
'project_id', params.get('tenant_id', context.project_id))
quotas = QUOTAS.get_project_quotas(context, project_id,
usages=True)
quota_map = {
'totalVolumesUsed': 'volumes',
'totalGigabytesUsed': 'gigabytes',
'totalSnapshotsUsed': 'snapshots',
'totalBackupsUsed': 'backups',
'totalBackupGigabytesUsed': 'backup_gigabytes'
}
used_limits = {}
for display_name, single_quota in quota_map.items():
if single_quota in quotas:
used_limits[display_name] = quotas[single_quota]['in_use']
resp_obj.obj['limits']['absolute'].update(used_limits)
class Used_limits(extensions.ExtensionDescriptor):
"""Provide data on limited resources that are being used."""
name = "UsedLimits"
alias = 'os-used-limits'
updated = "2013-10-03T00:00:00+00:00"
def get_controller_extensions(self):
controller = UsedLimitsController()
extension = extensions.ControllerExtension(self, 'limits', controller)
return [extension]
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/volume_actions.py 0000664 0000000 0000000 00000042334 15131732575 0025454 0 ustar 00root root 0000000 0000000 # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import HTTPStatus
from castellan import key_manager
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_utils import strutils
import webob
from cinder.api import extensions
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import volume_actions as schema
from cinder.api import validation
from cinder import exception
from cinder.i18n import _
from cinder.image import glance
from cinder.policies import volume_actions as policy
from cinder import volume
from cinder.volume import volume_utils
CONF = cfg.CONF
class VolumeActionsController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(VolumeActionsController, self).__init__(*args, **kwargs)
self._key_mgr = None
self.volume_api = volume.API()
@property
def _key_manager(self):
# Allows for lazy initialization of the key manager
if self._key_mgr is None:
self._key_mgr = key_manager.API(CONF)
return self._key_mgr
@wsgi.response(HTTPStatus.ACCEPTED)
@wsgi.action('os-attach')
@validation.schema(schema.attach)
def _attach(self, req, id, body):
"""Add attachment metadata."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, id)
# instance UUID is an option now
instance_uuid = None
if 'instance_uuid' in body['os-attach']:
instance_uuid = body['os-attach']['instance_uuid']
host_name = None
# Keep API backward compatibility
if 'host_name' in body['os-attach']:
host_name = body['os-attach']['host_name']
mountpoint = body['os-attach']['mountpoint']
mode = body['os-attach'].get('mode', 'rw')
try:
self.volume_api.attach(context, volume,
instance_uuid, host_name, mountpoint, mode)
except messaging.RemoteError as error:
if error.exc_type in ['InvalidVolume', 'InvalidUUID',
'InvalidVolumeAttachMode']:
msg = _("Error attaching volume - %(err_type)s: "
"%(err_msg)s") % {
'err_type': error.exc_type, 'err_msg': error.value}
raise webob.exc.HTTPBadRequest(explanation=msg)
else:
# There are also few cases where attach call could fail due to
# db or volume driver errors. These errors shouldn't be exposed
# to the user and in such cases it should raise 500 error.
raise
@wsgi.response(HTTPStatus.ACCEPTED)
@wsgi.action('os-detach')
@validation.schema(schema.detach)
def _detach(self, req, id, body):
"""Clear attachment metadata."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, id)
attachment_id = None
attachment_id = body['os-detach'].get('attachment_id', None)
try:
self.volume_api.detach(context, volume, attachment_id)
except messaging.RemoteError as error:
if error.exc_type in ['VolumeAttachmentNotFound', 'InvalidVolume']:
msg = _("Error detaching volume - %(err_type)s: "
"%(err_msg)s") % {
'err_type': error.exc_type, 'err_msg': error.value}
raise webob.exc.HTTPBadRequest(explanation=msg)
else:
# There are also few cases where detach call could fail due to
# db or volume driver errors. These errors shouldn't be exposed
# to the user and in such cases it should raise 500 error.
raise
@wsgi.response(HTTPStatus.ACCEPTED)
@wsgi.action('os-reserve')
@validation.schema(schema.reserve)
def _reserve(self, req, id, body):
"""Mark volume as reserved."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, id)
self.volume_api.reserve_volume(context, volume)
@wsgi.response(HTTPStatus.ACCEPTED)
@wsgi.action('os-unreserve')
@validation.schema(schema.unreserve)
def _unreserve(self, req, id, body):
"""Unmark volume as reserved."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, id)
self.volume_api.unreserve_volume(context, volume)
@wsgi.response(HTTPStatus.ACCEPTED)
@wsgi.action('os-begin_detaching')
@validation.schema(schema.begin_detaching)
def _begin_detaching(self, req, id, body):
"""Update volume status to 'detaching'."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, id)
self.volume_api.begin_detaching(context, volume)
@wsgi.response(HTTPStatus.ACCEPTED)
@wsgi.action('os-roll_detaching')
@validation.schema(schema.roll_detaching)
def _roll_detaching(self, req, id, body):
"""Roll back volume status to 'in-use'."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, id)
self.volume_api.roll_detaching(context, volume)
@wsgi.action('os-initialize_connection')
@validation.schema(schema.initialize_connection)
def _initialize_connection(self, req, id, body):
"""Initialize volume attachment."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, id)
connector = body['os-initialize_connection']['connector']
try:
info = self.volume_api.initialize_connection(context,
volume,
connector)
except exception.InvalidInput as err:
raise webob.exc.HTTPBadRequest(
explanation=err.msg)
except exception.VolumeBackendAPIException:
msg = _("Unable to fetch connection information from backend.")
raise webob.exc.HTTPInternalServerError(explanation=msg)
except messaging.RemoteError as error:
if error.exc_type == 'InvalidInput':
raise exception.InvalidInput(reason=error.value)
raise
info['enforce_multipath'] = connector.get('enforce_multipath', False)
return {'connection_info': info}
@wsgi.response(HTTPStatus.ACCEPTED)
@wsgi.action('os-terminate_connection')
@validation.schema(schema.terminate_connection)
def _terminate_connection(self, req, id, body):
"""Terminate volume attachment."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, id)
connector = body['os-terminate_connection']['connector']
try:
self.volume_api.terminate_connection(context, volume, connector)
except exception.VolumeBackendAPIException:
msg = _("Unable to terminate volume connection from backend.")
raise webob.exc.HTTPInternalServerError(explanation=msg)
@wsgi.response(HTTPStatus.ACCEPTED)
@wsgi.action('os-volume_upload_image')
@validation.schema(schema.volume_upload_image, mv.BASE_VERSION,
mv.get_prior_version(mv.UPLOAD_IMAGE_PARAMS))
@validation.schema(schema.volume_upload_image_v31,
mv.UPLOAD_IMAGE_PARAMS)
def _volume_upload_image(self, req, id, body):
"""Uploads the specified volume to image service."""
context = req.environ['cinder.context']
params = body['os-volume_upload_image']
req_version = req.api_version_request
force = params.get('force', 'False')
force = strutils.bool_from_string(force, strict=True)
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, id)
context.authorize(policy.UPLOAD_IMAGE_POLICY)
disk_format = params.get("disk_format", "raw")
image_metadata = {"container_format": params.get(
"container_format", "bare"),
"disk_format": disk_format,
"name": params["image_name"]}
if volume.encryption_key_id:
# encrypted volumes cannot be converted on upload
if (image_metadata['disk_format'] != 'raw'
or image_metadata['container_format'] != 'bare'):
msg = _("An encrypted volume uploaded as an image must use "
"'raw' disk_format and 'bare' container_format, "
"which are the defaults for these options.")
raise webob.exc.HTTPBadRequest(explanation=msg)
# Clone volume encryption key: the current key cannot
# be reused because it will be deleted when the volume is
# deleted.
encryption_key_id = volume_utils.clone_encryption_key(
context, self._key_manager, volume.encryption_key_id)
image_metadata['cinder_encryption_key_id'] = encryption_key_id
image_metadata['cinder_encryption_key_deletion_policy'] = \
'on_image_deletion'
if req_version >= mv.get_api_version(
mv.UPLOAD_IMAGE_PARAMS):
image_metadata['visibility'] = params.get('visibility', 'private')
image_metadata['protected'] = strutils.bool_from_string(
params.get('protected', 'False'), strict=True)
if image_metadata['visibility'] == 'public':
context.authorize(policy.UPLOAD_PUBLIC_POLICY)
try:
response = self.volume_api.copy_volume_to_image(context,
volume,
image_metadata,
force)
except exception.InvalidVolume as error:
raise webob.exc.HTTPBadRequest(explanation=error.msg)
except ValueError as error:
raise webob.exc.HTTPBadRequest(explanation=str(error))
except messaging.RemoteError as error:
msg = "%(err_type)s: %(err_msg)s" % {'err_type': error.exc_type,
'err_msg': error.value}
raise webob.exc.HTTPBadRequest(explanation=msg)
except Exception as error:
raise webob.exc.HTTPBadRequest(explanation=str(error))
return {'os-volume_upload_image': response}
@wsgi.response(HTTPStatus.ACCEPTED)
@wsgi.action('os-extend')
@validation.schema(schema.extend)
def _extend(self, req, id, body):
"""Extend size of volume."""
context = req.environ['cinder.context']
req_version = req.api_version_request
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, id)
size = int(body['os-extend']['new_size'])
try:
if (req_version.matches(mv.VOLUME_EXTEND_INUSE) and
volume.status in ['in-use']):
self.volume_api.extend_attached_volume(context, volume, size)
else:
self.volume_api.extend(context, volume, size)
except exception.InvalidVolume as error:
raise webob.exc.HTTPBadRequest(explanation=error.msg)
@wsgi.response(HTTPStatus.ACCEPTED)
@wsgi.action('os-update_readonly_flag')
@validation.schema(schema.volume_readonly_update)
def _volume_readonly_update(self, req, id, body):
"""Update volume readonly flag."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, id)
readonly_flag = body['os-update_readonly_flag']['readonly']
readonly_flag = strutils.bool_from_string(readonly_flag,
strict=True)
self.volume_api.update_readonly_flag(context, volume, readonly_flag)
@wsgi.response(HTTPStatus.ACCEPTED)
@wsgi.action('os-retype')
@validation.schema(schema.retype)
def _retype(self, req, id, body):
"""Change type of existing volume."""
context = req.environ['cinder.context']
volume = self.volume_api.get(context, id)
new_type = body['os-retype']['new_type']
policy = body['os-retype'].get('migration_policy')
self.volume_api.retype(context, volume, new_type, policy)
@wsgi.response(HTTPStatus.OK)
@wsgi.action('os-set_bootable')
@validation.schema(schema.set_bootable)
def _set_bootable(self, req, id, body):
"""Update bootable status of a volume."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, id)
bootable = strutils.bool_from_string(
body['os-set_bootable']['bootable'], strict=True)
update_dict = {'bootable': bootable}
self.volume_api.update(context, volume, update_dict)
def _get_image_snapshot_and_check_size(self, context, image_uuid,
volume_size):
image_snapshot = None
if image_uuid:
image_service = glance.get_default_image_service()
image_meta = image_service.show(context, image_uuid)
if image_meta is not None:
bdms = image_meta.get('properties', {}).get(
'block_device_mapping', [])
if bdms:
boot_bdm = [bdm for bdm in bdms if (
bdm.get('source_type') == 'snapshot' and
bdm.get('boot_index') == 0)]
if boot_bdm:
try:
# validate size
image_snap_size = boot_bdm[0].get('volume_size')
if image_snap_size > volume_size:
msg = (_(
"Volume size must be greater than the "
"image size. (Image: %(img_size)s, "
"Volume: %(vol_size)s).") % {
'img_size': image_snap_size,
'vol_size': volume_size})
raise webob.exc.HTTPBadRequest(explanation=msg)
image_snapshot = self.volume_api.get_snapshot(
context, boot_bdm[0].get('snapshot_id'))
except exception.NotFound:
explanation = _(
'Nova specific image is found, but boot '
'volume snapshot id:%s not found.'
) % boot_bdm[0].get('snapshot_id')
raise webob.exc.HTTPNotFound(
explanation=explanation)
return image_snapshot
@wsgi.Controller.api_version(mv.SUPPORT_REIMAGE_VOLUME)
@wsgi.response(HTTPStatus.ACCEPTED)
@wsgi.action('os-reimage')
@validation.schema(schema.reimage, mv.SUPPORT_REIMAGE_VOLUME)
def _reimage(self, req, id, body):
"""Re-image a volume with specific image."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, id)
params = body['os-reimage']
reimage_reserved = params.get('reimage_reserved', 'False')
reimage_reserved = strutils.bool_from_string(reimage_reserved,
strict=True)
image_id = params['image_id']
image_snap = self._get_image_snapshot_and_check_size(
context, image_id, volume.size)
try:
self.volume_api.reimage(context, volume, image_id,
reimage_reserved, image_snap)
except exception.InvalidVolume as error:
raise webob.exc.HTTPBadRequest(explanation=error.msg)
class Volume_actions(extensions.ExtensionDescriptor):
"""Enable volume actions."""
name = "VolumeActions"
alias = "os-volume-actions"
updated = "2012-05-31T00:00:00+00:00"
def get_controller_extensions(self):
controller = VolumeActionsController()
extension = extensions.ControllerExtension(self, 'volumes', controller)
return [extension]
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/volume_encryption_metadata.py 0000664 0000000 0000000 00000004222 15131732575 0030040 0 ustar 00root root 0000000 0000000 # Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volume encryption metadata extension."""
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder import db
from cinder import objects
from cinder.policies import volumes as policy
class VolumeEncryptionMetadataController(wsgi.Controller):
"""The volume encryption metadata API extension."""
def index(self, req, volume_id):
"""Returns the encryption metadata for a given volume."""
context = req.environ['cinder.context']
volume = objects.Volume.get_by_id(context, volume_id)
context.authorize(policy.ENCRYPTION_METADATA_POLICY,
target_obj=volume)
return db.volume_encryption_metadata_get(context, volume_id)
def show(self, req, volume_id, id):
"""Return a single encryption item."""
encryption_item = self.index(req, volume_id)
if encryption_item is not None:
return encryption_item[id]
else:
return None
class Volume_encryption_metadata(extensions.ExtensionDescriptor):
"""Volume encryption metadata retrieval support."""
name = "VolumeEncryptionMetadata"
alias = "os-volume-encryption-metadata"
updated = "2013-07-10T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
'encryption', VolumeEncryptionMetadataController(),
parent=dict(member_name='volume', collection_name='volumes'))
resources.append(res)
return resources
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/volume_host_attribute.py 0000664 0000000 0000000 00000003703 15131732575 0027051 0 ustar 00root root 0000000 0000000 # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.policies import volumes as policy
class VolumeHostAttributeController(wsgi.Controller):
def _add_volume_host_attribute(self, req, resp_volume):
db_volume = req.get_db_volume(resp_volume['id'])
key = "%s:host" % Volume_host_attribute.alias
resp_volume[key] = db_volume['host']
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['cinder.context']
if context.authorize(policy.HOST_ATTRIBUTE_POLICY, fatal=False):
volume = resp_obj.obj['volume']
self._add_volume_host_attribute(req, volume)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['cinder.context']
if context.authorize(policy.HOST_ATTRIBUTE_POLICY, fatal=False):
for vol in list(resp_obj.obj['volumes']):
self._add_volume_host_attribute(req, vol)
class Volume_host_attribute(extensions.ExtensionDescriptor):
"""Expose host as an attribute of a volume."""
name = "VolumeHostAttribute"
alias = "os-vol-host-attr"
updated = "2011-11-03T00:00:00+00:00"
def get_controller_extensions(self):
controller = VolumeHostAttributeController()
extension = extensions.ControllerExtension(self, 'volumes', controller)
return [extension]
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/volume_image_metadata.py 0000664 0000000 0000000 00000014767 15131732575 0026747 0 ustar 00root root 0000000 0000000 # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Volume Image Metadata API extension."""
from http import HTTPStatus
from oslo_log import log as logging
import webob
from cinder.api import common
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.schemas import volume_image_metadata as schema
from cinder.api import validation
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.policies import volume_metadata as policy
from cinder import volume
LOG = logging.getLogger(__name__)
class VolumeImageMetadataController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(VolumeImageMetadataController, self).__init__(*args, **kwargs)
self.volume_api = volume.API()
def _get_image_metadata(self, context, volume_id):
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, volume_id)
meta = self.volume_api.get_volume_image_metadata(context, volume)
return (volume, meta)
def _add_image_metadata(self, context, resp_volume_list, image_metas=None):
"""Appends the image metadata to each of the given volume.
:param context: the request context
:param resp_volume_list: the response volume list
:param image_metas: The image metadata to append, if None is provided
it will be retrieved from the database. An empty
dict means there is no metadata and it should not
be retrieved from the db.
"""
vol_id_list = []
for vol in resp_volume_list:
vol_id_list.append(vol['id'])
if image_metas is None:
try:
image_metas = self.volume_api.get_list_volumes_image_metadata(
context, vol_id_list)
except Exception as e:
LOG.debug('Get image metadata error: %s', e)
return
if image_metas:
for vol in resp_volume_list:
image_meta = image_metas.get(vol['id'])
if image_meta:
vol['volume_image_metadata'] = dict(image_meta)
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['cinder.context']
if context.authorize(policy.IMAGE_METADATA_SHOW_POLICY, fatal=False):
self._add_image_metadata(context, [resp_obj.obj['volume']])
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['cinder.context']
if context.authorize(policy.IMAGE_METADATA_SHOW_POLICY, fatal=False):
# Just get the image metadata of those volumes in response.
volumes = list(resp_obj.obj.get('volumes', []))
if volumes:
self._add_image_metadata(context, volumes)
@wsgi.action("os-set_image_metadata")
@validation.schema(schema.set_image_metadata)
def create(self, req, id, body):
context = req.environ['cinder.context']
volume = objects.Volume.get_by_id(context, id)
if context.authorize(policy.IMAGE_METADATA_SET_POLICY,
target_obj=volume):
metadata = body['os-set_image_metadata']['metadata']
new_metadata = self._update_volume_image_metadata(context,
id,
metadata,
delete=False)
return {'metadata': new_metadata}
def _update_volume_image_metadata(self, context,
volume_id,
metadata,
delete=False):
try:
volume = self.volume_api.get(context, volume_id)
return self.volume_api.update_volume_metadata(
context,
volume,
metadata,
delete=False,
meta_type=common.METADATA_TYPES.image)
# Not found exception will be handled at the wsgi level
except (ValueError, AttributeError):
msg = _("Malformed request body.")
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.InvalidVolumeMetadata as error:
raise webob.exc.HTTPBadRequest(explanation=error.msg)
except exception.InvalidVolumeMetadataSize as error:
raise webob.exc.HTTPRequestEntityTooLarge(explanation=error.msg)
@wsgi.action("os-show_image_metadata")
@validation.schema(schema.index)
def index(self, req, id, body):
context = req.environ['cinder.context']
return {'metadata': self._get_image_metadata(context, id)[1]}
@wsgi.action("os-unset_image_metadata")
@validation.schema(schema.unset_image_metadata)
def delete(self, req, id, body):
"""Deletes an existing image metadata."""
context = req.environ['cinder.context']
volume = objects.Volume.get_by_id(context, id)
if context.authorize(policy.IMAGE_METADATA_REMOVE_POLICY,
target_obj=volume):
key = body['os-unset_image_metadata']['key']
vol, metadata = self._get_image_metadata(context, id)
if key not in metadata:
raise exception.GlanceMetadataNotFound(id=id)
self.volume_api.delete_volume_metadata(
context, vol, key,
meta_type=common.METADATA_TYPES.image)
return webob.Response(status_int=HTTPStatus.OK)
class Volume_image_metadata(extensions.ExtensionDescriptor):
"""Show image metadata associated with the volume."""
name = "VolumeImageMetadata"
alias = "os-vol-image-meta"
updated = "2012-12-07T00:00:00+00:00"
def get_controller_extensions(self):
controller = VolumeImageMetadataController()
extension = extensions.ControllerExtension(self, 'volumes', controller)
return [extension]
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/volume_manage.py 0000664 0000000 0000000 00000017712 15131732575 0025246 0 ustar 00root root 0000000 0000000 # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import HTTPStatus
from oslo_log import log as logging
from oslo_utils import strutils
import webob
from cinder.api import api_utils
from cinder.api import common
from cinder.api.contrib import resource_common_manage
from cinder.api import extensions
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import volume_manage as schema
from cinder.api.v3.views import volumes as volume_views
from cinder.api import validation
from cinder.api.views import manageable_volumes as list_manageable_view
from cinder import exception
from cinder.i18n import _
from cinder.policies import manageable_volumes as policy
from cinder import volume as cinder_volume
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
class VolumeManageController(wsgi.Controller):
"""The /os-volume-manage controller for the OpenStack API."""
_view_builder_class = volume_views.ViewBuilder
def __init__(self, *args, **kwargs):
super(VolumeManageController, self).__init__(*args, **kwargs)
self.volume_api = cinder_volume.API()
self._list_manageable_view = list_manageable_view.ViewBuilder()
@wsgi.response(HTTPStatus.ACCEPTED)
@validation.schema(schema.volume_manage_create, mv.BASE_VERSION,
mv.get_prior_version(mv.VOLUME_MIGRATE_CLUSTER))
@validation.schema(schema.volume_manage_create_v316,
mv.VOLUME_MIGRATE_CLUSTER)
def create(self, req, body):
"""Instruct Cinder to manage a storage object.
Manages an existing backend storage object (e.g. a Linux logical
volume or a SAN disk) by creating the Cinder objects required to manage
it, and possibly renaming the backend storage object
(driver dependent)
From an API perspective, this operation behaves very much like a
volume creation operation, except that properties such as image,
snapshot and volume references don't make sense, because we are taking
an existing storage object into Cinder management.
Required HTTP Body:
.. code-block:: json
{
"volume": {
"host": "",
"cluster": "",
"ref": ""
}
}
See the appropriate Cinder drivers' implementations of the
manage_volume method to find out the accepted format of 'ref'.
This API call will return with an error if any of the above elements
are missing from the request, or if the 'host' element refers to a
cinder host that is not registered.
The volume will later enter the error state if it is discovered that
'ref' is bad.
Optional elements to 'volume' are::
name A name for the new volume.
description A description for the new volume.
volume_type ID or name of a volume type to associate with
the new Cinder volume. Does not necessarily
guarantee that the managed volume will have the
properties described in the volume_type. The
driver may choose to fail if it identifies that
the specified volume_type is not compatible with
the backend storage object.
metadata Key/value pairs to be associated with the new
volume.
availability_zone The availability zone to associate with the new
volume.
bootable If set to True, marks the volume as bootable.
"""
context = req.environ['cinder.context']
context.authorize(policy.MANAGE_POLICY)
volume = body['volume']
cluster_name, host = common.get_cluster_host(
req, volume, mv.VOLUME_MIGRATE_CLUSTER)
LOG.debug('Manage volume request body: %s', body)
kwargs = {}
req_volume_type = volume.get('volume_type', None)
if req_volume_type:
try:
kwargs['volume_type'] = volume_types.get_by_name_or_id(
context, req_volume_type)
except exception.VolumeTypeNotFound:
msg = _("Cannot find requested '%s' "
"volume type") % req_volume_type
raise exception.InvalidVolumeType(reason=msg)
else:
kwargs['volume_type'] = {}
if volume.get('name'):
kwargs['name'] = volume.get('name').strip()
if volume.get('description'):
kwargs['description'] = volume.get('description').strip()
kwargs['metadata'] = volume.get('metadata', None)
kwargs['availability_zone'] = volume.get('availability_zone', None)
bootable = volume.get('bootable', False)
kwargs['bootable'] = strutils.bool_from_string(bootable, strict=True)
try:
new_volume = self.volume_api.manage_existing(context,
host,
cluster_name,
volume['ref'],
**kwargs)
except exception.ServiceNotFound:
msg = _("%(name)s '%(value)s' not found") % {
'name': 'Host' if host else 'Cluster',
'value': host or cluster_name}
raise exception.ServiceUnavailable(message=msg)
except exception.VolumeTypeDefaultMisconfiguredError as err:
raise webob.exc.HTTPInternalServerError(explanation=err.msg)
api_utils.add_visible_admin_metadata(new_volume)
# FIXME: This should be respecting microversions but it doesn't, which
# means we're missing many fields added in recent microversions. We
# should address this with a new microversion
return self._view_builder.legacy_detail(req, new_volume)
@wsgi.extends
def index(self, req):
"""Returns a summary list of volumes available to manage."""
context = req.environ['cinder.context']
context.authorize(policy.LIST_MANAGEABLE_POLICY)
return resource_common_manage.get_manageable_resources(
req, False, self.volume_api.get_manageable_volumes,
self._list_manageable_view)
@wsgi.extends
def detail(self, req):
"""Returns a detailed list of volumes available to manage."""
context = req.environ['cinder.context']
context.authorize(policy.LIST_MANAGEABLE_POLICY)
return resource_common_manage.get_manageable_resources(
req, True, self.volume_api.get_manageable_volumes,
self._list_manageable_view)
class Volume_manage(extensions.ExtensionDescriptor):
"""Allows existing backend storage to be 'managed' by Cinder."""
name = 'VolumeManage'
alias = 'os-volume-manage'
updated = '2014-02-10T00:00:00+00:00'
def get_resources(self):
controller = VolumeManageController()
res = extensions.ResourceExtension(Volume_manage.alias,
controller,
collection_actions=
{'detail': 'GET'})
return [res]
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/volume_mig_status_attribute.py 0000664 0000000 0000000 00000004130 15131732575 0030246 0 ustar 00root root 0000000 0000000 # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.policies import volumes as policy
class VolumeMigStatusAttributeController(wsgi.Controller):
def _add_volume_mig_status_attribute(self, req, resp_volume):
db_volume = req.get_db_volume(resp_volume['id'])
key = "%s:migstat" % Volume_mig_status_attribute.alias
resp_volume[key] = db_volume['migration_status']
key = "%s:name_id" % Volume_mig_status_attribute.alias
resp_volume[key] = db_volume['_name_id']
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['cinder.context']
if context.authorize(policy.MIG_ATTRIBUTE_POLICY, fatal=False):
self._add_volume_mig_status_attribute(req, resp_obj.obj['volume'])
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['cinder.context']
if context.authorize(policy.MIG_ATTRIBUTE_POLICY, fatal=False):
for vol in list(resp_obj.obj['volumes']):
self._add_volume_mig_status_attribute(req, vol)
class Volume_mig_status_attribute(extensions.ExtensionDescriptor):
"""Expose migration_status as an attribute of a volume."""
name = "VolumeMigStatusAttribute"
alias = "os-vol-mig-status-attr"
updated = "2013-08-08T00:00:00+00:00"
def get_controller_extensions(self):
controller = VolumeMigStatusAttributeController()
extension = extensions.ControllerExtension(self, 'volumes', controller)
return [extension]
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/volume_tenant_attribute.py 0000664 0000000 0000000 00000003767 15131732575 0027377 0 ustar 00root root 0000000 0000000 # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.policies import volumes as policy
class VolumeTenantAttributeController(wsgi.Controller):
def _add_volume_tenant_attribute(self, req, resp_volume):
db_volume = req.get_db_volume(resp_volume['id'])
key = "%s:tenant_id" % Volume_tenant_attribute.alias
resp_volume[key] = db_volume['project_id']
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['cinder.context']
if context.authorize(policy.TENANT_ATTRIBUTE_POLICY, fatal=False):
volume = resp_obj.obj['volume']
self._add_volume_tenant_attribute(req, volume)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['cinder.context']
if context.authorize(policy.TENANT_ATTRIBUTE_POLICY, fatal=False):
for vol in list(resp_obj.obj['volumes']):
self._add_volume_tenant_attribute(req, vol)
class Volume_tenant_attribute(extensions.ExtensionDescriptor):
"""Expose the internal project_id as an attribute of a volume."""
name = "VolumeTenantAttribute"
alias = "os-vol-tenant-attr"
updated = "2011-11-03T00:00:00+00:00"
def get_controller_extensions(self):
controller = VolumeTenantAttributeController()
extension = extensions.ControllerExtension(self, 'volumes', controller)
return [extension]
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/volume_transfer.py 0000664 0000000 0000000 00000013645 15131732575 0025643 0 ustar 00root root 0000000 0000000 # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import HTTPStatus
from oslo_log import log as logging
import webob
from webob import exc
from cinder.api import common
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.schemas import volume_transfer as schema
from cinder.api import validation
from cinder.api.views import transfers as transfer_view
from cinder import exception
from cinder import transfer as transferAPI
LOG = logging.getLogger(__name__)
class VolumeTransferController(wsgi.Controller):
"""The Volume Transfer API controller for the OpenStack API."""
_view_builder_class = transfer_view.ViewBuilder
def __init__(self):
self.transfer_api = transferAPI.API()
super(VolumeTransferController, self).__init__()
def show(self, req, id):
"""Return data about active transfers."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
transfer = self.transfer_api.get(context, transfer_id=id)
return self._view_builder.detail(req, transfer)
def index(self, req):
"""Returns a summary list of transfers."""
return self._get_transfers(req, is_detail=False)
def detail(self, req):
"""Returns a detailed list of transfers."""
return self._get_transfers(req, is_detail=True)
def _get_transfers(self, req, is_detail):
"""Returns a list of transfers, transformed through view builder."""
context = req.environ['cinder.context']
filters = req.params.copy()
LOG.debug('Listing volume transfers')
if 'name' in filters:
filters['display_name'] = filters.pop('name')
transfers = self.transfer_api.get_all(context, filters=filters,
sort_keys=['created_at', 'id'],
sort_dirs=['asc', 'asc'])
transfer_count = len(transfers)
limited_list = common.limited(transfers, req)
if is_detail:
transfers = self._view_builder.detail_list(req, limited_list,
transfer_count)
else:
transfers = self._view_builder.summary_list(req, limited_list,
transfer_count)
return transfers
@wsgi.response(HTTPStatus.ACCEPTED)
@validation.schema(schema.create)
def create(self, req, body):
"""Create a new volume transfer."""
LOG.debug('Creating new volume transfer %s', body)
context = req.environ['cinder.context']
transfer = body['transfer']
volume_id = transfer['volume_id']
name = transfer.get('name', None)
if name is not None:
name = name.strip()
LOG.info("Creating transfer of volume %s",
volume_id)
try:
new_transfer = self.transfer_api.create(context, volume_id, name,
no_snapshots=False)
# Not found exception will be handled at the wsgi level
except exception.InvalidVolume as error:
raise exc.HTTPBadRequest(explanation=error.msg)
transfer = self._view_builder.create(req,
dict(new_transfer))
return transfer
@wsgi.response(HTTPStatus.ACCEPTED)
@validation.schema(schema.accept)
def accept(self, req, id, body):
"""Accept a new volume transfer."""
transfer_id = id
LOG.debug('Accepting volume transfer %s', transfer_id)
context = req.environ['cinder.context']
accept = body['accept']
auth_key = accept['auth_key']
LOG.info("Accepting transfer %s", transfer_id)
try:
accepted_transfer = self.transfer_api.accept(context, transfer_id,
auth_key)
except exception.VolumeSizeExceedsAvailableQuota as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.msg, headers={'Retry-After': '0'})
except exception.InvalidVolume as error:
raise exc.HTTPBadRequest(explanation=error.msg)
transfer = \
self._view_builder.summary(req,
dict(accepted_transfer))
return transfer
def delete(self, req, id):
"""Delete a transfer."""
context = req.environ['cinder.context']
LOG.info("Delete transfer with id: %s", id)
# Not found exception will be handled at the wsgi level
self.transfer_api.delete(context, transfer_id=id)
return webob.Response(status_int=HTTPStatus.ACCEPTED)
class Volume_transfer(extensions.ExtensionDescriptor):
"""Volume transfer management support."""
name = "VolumeTransfer"
alias = "os-volume-transfer"
updated = "2013-05-29T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension(Volume_transfer.alias,
VolumeTransferController(),
collection_actions={'detail':
'GET'},
member_actions={'accept': 'POST'})
resources.append(res)
return resources
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/volume_type_access.py 0000664 0000000 0000000 00000013071 15131732575 0026312 0 ustar 00root root 0000000 0000000 #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volume type access extension."""
from http import HTTPStatus
import webob
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.schemas import volume_type_access as schema
from cinder.api import validation
from cinder import exception
from cinder.i18n import _
from cinder.policies import volume_access as policy
from cinder.volume import volume_types
def _marshall_volume_type_access(vol_type):
rval = []
for project_id in vol_type['projects']:
rval.append({'volume_type_id': vol_type['id'],
'project_id': project_id})
return {'volume_type_access': rval}
class VolumeTypeAccessController(object):
"""The volume type access API controller for the OpenStack API."""
def index(self, req, type_id):
context = req.environ['cinder.context']
context.authorize(policy.TYPE_ACCESS_WHO_POLICY)
# Not found exception will be handled at the wsgi level
vol_type = volume_types.get_volume_type(
context, type_id, expected_fields=['projects'])
if vol_type['is_public']:
expl = _("Access list not available for public volume types.")
raise exception.VolumeTypeAccessNotFound(message=expl)
return _marshall_volume_type_access(vol_type)
class VolumeTypeActionController(wsgi.Controller):
"""The volume type access API controller for the OpenStack API."""
def _extend_vol_type(self, vol_type_rval, vol_type_ref):
if vol_type_ref:
key = "%s:is_public" % (Volume_type_access.alias)
vol_type_rval[key] = vol_type_ref.get('is_public', True)
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['cinder.context']
if context.authorize(policy.TYPE_ACCESS_POLICY, fatal=False):
vol_type = req.cached_resource_by_id(id, name='types')
self._extend_vol_type(resp_obj.obj['volume_type'], vol_type)
@wsgi.extends
def index(self, req, resp_obj):
context = req.environ['cinder.context']
if context.authorize(policy.TYPE_ACCESS_POLICY, fatal=False):
for vol_type_rval in list(resp_obj.obj['volume_types']):
type_id = vol_type_rval['id']
vol_type = req.cached_resource_by_id(type_id, name='types')
self._extend_vol_type(vol_type_rval, vol_type)
# TODO: remove this, there is no /types/detail call for this to extend
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['cinder.context']
if context.authorize(policy.TYPE_ACCESS_POLICY, fatal=False):
for vol_type_rval in list(resp_obj.obj['volume_types']):
type_id = vol_type_rval['id']
vol_type = req.cached_resource_by_id(type_id, name='types')
self._extend_vol_type(vol_type_rval, vol_type)
@wsgi.extends(action='create')
def create(self, req, body, resp_obj):
context = req.environ['cinder.context']
if context.authorize(policy.TYPE_ACCESS_POLICY, fatal=False):
type_id = resp_obj.obj['volume_type']['id']
vol_type = req.cached_resource_by_id(type_id, name='types')
self._extend_vol_type(resp_obj.obj['volume_type'], vol_type)
@wsgi.action('addProjectAccess')
@validation.schema(schema.add_project_access)
def _addProjectAccess(self, req, id, body):
context = req.environ['cinder.context']
context.authorize(policy.ADD_PROJECT_POLICY)
project = body['addProjectAccess']['project']
try:
volume_types.add_volume_type_access(context, id, project)
# Not found exception will be handled at the wsgi level
except exception.VolumeTypeAccessExists as err:
raise webob.exc.HTTPConflict(explanation=str(err))
return webob.Response(status_int=HTTPStatus.ACCEPTED)
@wsgi.action('removeProjectAccess')
@validation.schema(schema.remove_project_access)
def _removeProjectAccess(self, req, id, body):
context = req.environ['cinder.context']
context.authorize(policy.REMOVE_PROJECT_POLICY)
project = body['removeProjectAccess']['project']
# Not found exception will be handled at the wsgi level
volume_types.remove_volume_type_access(context, id, project)
return webob.Response(status_int=HTTPStatus.ACCEPTED)
class Volume_type_access(extensions.ExtensionDescriptor):
"""Volume type access support."""
name = "VolumeTypeAccess"
alias = "os-volume-type-access"
updated = "2014-06-26T00:00:00Z"
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
Volume_type_access.alias,
VolumeTypeAccessController(),
parent=dict(member_name='type', collection_name='types'))
resources.append(res)
return resources
def get_controller_extensions(self):
controller = VolumeTypeActionController()
extension = extensions.ControllerExtension(self, 'types', controller)
return [extension]
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/volume_type_encryption.py 0000664 0000000 0000000 00000014505 15131732575 0027246 0 ustar 00root root 0000000 0000000 # Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volume types encryption extension."""
from http import HTTPStatus
import webob
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.schemas import volume_type_encryption as schema
from cinder.api import validation
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.policies import volume_type as policy
from cinder import rpc
from cinder.volume import volume_types
class VolumeTypeEncryptionController(wsgi.Controller):
"""The volume type encryption API controller for the OpenStack API."""
def _get_volume_type_encryption(self, context, type_id):
encryption_ref = db.volume_type_encryption_get(context, type_id)
encryption_specs = {}
if not encryption_ref:
return encryption_specs
for key, value in encryption_ref.items():
encryption_specs[key] = value
return encryption_specs
def _check_type(self, context, type_id):
# Not found exception will be handled at the wsgi level
volume_types.get_volume_type(context, type_id)
def _encrypted_type_in_use(self, context, volume_type_id):
volume_list = db.volume_type_encryption_volume_get(context,
volume_type_id)
# If there is at least one volume in the list
# returned, this type is in use by a volume.
if len(volume_list) > 0:
return True
else:
return False
def index(self, req, type_id):
"""Returns the encryption specs for a given volume type."""
context = req.environ['cinder.context']
context.authorize(policy.GET_ENCRYPTION_POLICY)
self._check_type(context, type_id)
return self._get_volume_type_encryption(context, type_id)
@validation.schema(schema.create)
def create(self, req, type_id, body):
"""Create encryption specs for an existing volume type."""
context = req.environ['cinder.context']
context.authorize(policy.CREATE_ENCRYPTION_POLICY)
key_size = body['encryption'].get('key_size')
if key_size is not None:
body['encryption']['key_size'] = int(key_size)
if self._encrypted_type_in_use(context, type_id):
expl = _('Cannot create encryption specs. Volume type in use.')
raise webob.exc.HTTPBadRequest(explanation=expl)
self._check_type(context, type_id)
encryption_specs = self._get_volume_type_encryption(context, type_id)
if encryption_specs:
raise exception.VolumeTypeEncryptionExists(type_id=type_id)
encryption_specs = body['encryption']
db.volume_type_encryption_create(context, type_id, encryption_specs)
notifier_info = dict(type_id=type_id, specs=encryption_specs)
notifier = rpc.get_notifier('volumeTypeEncryption')
notifier.info(context, 'volume_type_encryption.create', notifier_info)
return body
@validation.schema(schema.update)
def update(self, req, type_id, id, body):
"""Update encryption specs for a given volume type."""
context = req.environ['cinder.context']
context.authorize(policy.UPDATE_ENCRYPTION_POLICY)
key_size = body['encryption'].get('key_size')
if key_size is not None:
body['encryption']['key_size'] = int(key_size)
self._check_type(context, type_id)
if self._encrypted_type_in_use(context, type_id):
expl = _('Cannot update encryption specs. Volume type in use.')
raise webob.exc.HTTPBadRequest(explanation=expl)
encryption_specs = body['encryption']
db.volume_type_encryption_update(context, type_id, encryption_specs)
notifier_info = dict(type_id=type_id, id=id)
notifier = rpc.get_notifier('volumeTypeEncryption')
notifier.info(context, 'volume_type_encryption.update', notifier_info)
return body
def show(self, req, type_id, id):
"""Return a single encryption item."""
context = req.environ['cinder.context']
context.authorize(policy.GET_ENCRYPTION_POLICY)
self._check_type(context, type_id)
encryption_specs = self._get_volume_type_encryption(context, type_id)
if id not in encryption_specs:
raise exception.VolumeTypeEncryptionNotFound(type_id=type_id)
return {id: encryption_specs[id]}
def delete(self, req, type_id, id):
"""Delete encryption specs for a given volume type."""
context = req.environ['cinder.context']
context.authorize(policy.DELETE_ENCRYPTION_POLICY)
if self._encrypted_type_in_use(context, type_id):
expl = _('Cannot delete encryption specs. Volume type in use.')
raise webob.exc.HTTPBadRequest(explanation=expl)
else:
# Not found exception will be handled at the wsgi level
db.volume_type_encryption_delete(context, type_id)
return webob.Response(status_int=HTTPStatus.ACCEPTED)
class Volume_type_encryption(extensions.ExtensionDescriptor):
"""Encryption support for volume types."""
name = "VolumeTypeEncryption"
alias = "encryption"
updated = "2013-07-01T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
Volume_type_encryption.alias,
VolumeTypeEncryptionController(),
parent=dict(member_name='type', collection_name='types'))
resources.append(res)
return resources
def get_controller_extensions(self):
controller = VolumeTypeEncryptionController()
extension = extensions.ControllerExtension(self, 'types', controller)
return [extension]
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/contrib/volume_unmanage.py 0000664 0000000 0000000 00000005167 15131732575 0025612 0 ustar 00root root 0000000 0000000 # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import HTTPStatus
from oslo_log import log as logging
import webob
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.schemas import volume_unmanage as schema
from cinder.api import validation
from cinder.policies import manageable_volumes as policy
from cinder import volume
LOG = logging.getLogger(__name__)
class VolumeUnmanageController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(VolumeUnmanageController, self).__init__(*args, **kwargs)
self.volume_api = volume.API()
@wsgi.response(HTTPStatus.ACCEPTED)
@wsgi.action('os-unmanage')
@validation.schema(schema.unmanage)
def unmanage(self, req, id, body):
"""Stop managing a volume.
This action is very much like a delete, except that a different
method (unmanage) is called on the Cinder driver. This has the effect
of removing the volume from Cinder management without actually
removing the backend storage object associated with it.
There are no required parameters.
A Not Found error is returned if the specified volume does not exist.
A Bad Request error is returned if the specified volume is still
attached to an instance.
"""
context = req.environ['cinder.context']
LOG.info("Unmanage volume with id: %s", id)
# Not found exception will be handled at the wsgi level
vol = self.volume_api.get(context, id)
context.authorize(policy.UNMANAGE_POLICY, target_obj=vol)
self.volume_api.delete(context, vol, unmanage_only=True)
return webob.Response(status_int=HTTPStatus.ACCEPTED)
class Volume_unmanage(extensions.ExtensionDescriptor):
"""Enable volume unmanage operation."""
name = "VolumeUnmanage"
alias = "os-volume-unmanage"
updated = "2012-05-31T00:00:00+00:00"
def get_controller_extensions(self):
controller = VolumeUnmanageController()
extension = extensions.ControllerExtension(self, 'volumes', controller)
return [extension]
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/extensions.py 0000664 0000000 0000000 00000026357 15131732575 0023173 0 ustar 00root root 0000000 0000000 # Copyright 2011 OpenStack Foundation
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import os
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
import webob.dec
import webob.exc
import cinder.api.openstack
from cinder.api.openstack import wsgi
from cinder import exception
import cinder.policy
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
FILES_TO_SKIP = ['resource_common_manage.py']
class ExtensionDescriptor(object):
"""Base class that defines the contract for extensions.
Note that you don't have to derive from this class to have a valid
extension; it is purely a convenience.
"""
# The name of the extension, e.g., 'Fox In Socks'
name = None
# The alias for the extension, e.g., 'FOXNSOX'
alias = None
# The timestamp when the extension was last updated, e.g.,
# '2011-01-22T13:25:27-06:00'
updated = None
def __init__(self, ext_mgr):
"""Register extension with the extension manager."""
ext_mgr.register(self)
self.ext_mgr = ext_mgr
def get_resources(self):
"""List of extensions.ResourceExtension extension objects.
Resources define new nouns, and are accessible through URLs.
"""
resources = []
return resources
def get_controller_extensions(self):
"""List of extensions.ControllerExtension extension objects.
Controller extensions are used to extend existing controllers.
"""
controller_exts = []
return controller_exts
class ExtensionsResource(wsgi.Resource):
def __init__(self, extension_manager):
self.extension_manager = extension_manager
super(ExtensionsResource, self).__init__(None)
def _translate(self, ext):
ext_data = {}
ext_data['name'] = ext.name
ext_data['alias'] = ext.alias
ext_data['description'] = ext.__doc__
ext_data['updated'] = ext.updated
ext_data['links'] = [] # TODO(dprince): implement extension links
return ext_data
def index(self, req):
extensions = []
for _alias, ext in self.extension_manager.extensions.items():
extensions.append(self._translate(ext))
return dict(extensions=extensions)
def show(self, req, id):
try:
# NOTE(dprince): the extensions alias is used as the 'id' for show
ext = self.extension_manager.extensions[id]
except KeyError:
raise webob.exc.HTTPNotFound()
return dict(extension=self._translate(ext))
class ExtensionManager(object):
"""Load extensions from the configured extension path.
See cinder/tests/api/extensions/foxinsocks/extension.py for an
example extension implementation.
"""
def __init__(self):
LOG.debug('Initializing extension manager.')
self.cls_list = CONF.osapi_volume_extension
self.extensions = {}
self._load_extensions()
def is_loaded(self, alias):
return alias in self.extensions
def register(self, ext):
# Do nothing if the extension doesn't check out
if not self._check_extension(ext):
return
alias = ext.alias
LOG.debug('Loaded extension: %s', alias)
if alias in self.extensions:
raise exception.CinderException(
"Found duplicate extension: %s" % alias)
self.extensions[alias] = ext
def get_resources(self):
"""Returns a list of ResourceExtension objects."""
resources = []
resources.append(ResourceExtension('extensions',
ExtensionsResource(self)))
for ext in self.extensions.values():
try:
resources.extend(ext.get_resources())
except AttributeError:
# NOTE(dprince): Extension aren't required to have resource
# extensions
pass
return resources
def get_controller_extensions(self):
"""Returns a list of ControllerExtension objects."""
controller_exts = []
for ext in self.extensions.values():
try:
get_ext_method = ext.get_controller_extensions
except AttributeError:
# NOTE(Vek): Extensions aren't required to have
# controller extensions
continue
controller_exts.extend(get_ext_method())
return controller_exts
def _check_extension(self, extension):
"""Checks for required methods in extension objects."""
try:
LOG.debug('Ext name: %s', extension.name)
LOG.debug('Ext alias: %s', extension.alias)
LOG.debug('Ext description: %s',
' '.join(extension.__doc__.strip().split()))
LOG.debug('Ext updated: %s', extension.updated)
except AttributeError:
LOG.exception("Exception loading extension.")
return False
return True
def load_extension(self, ext_factory):
"""Execute an extension factory.
Loads an extension. The 'ext_factory' is the name of a
callable that will be imported and called with one
argument--the extension manager. The factory callable is
expected to call the register() method at least once.
"""
LOG.debug("Loading extension %s", ext_factory)
# Load the factory
factory = importutils.import_class(ext_factory)
# Call it
LOG.debug("Calling extension factory %s", ext_factory)
factory(self)
def _load_extensions(self):
"""Load extensions specified on the command line."""
extensions = list(self.cls_list)
for ext_factory in extensions:
try:
self.load_extension(ext_factory)
except Exception as exc:
LOG.warning('Failed to load extension %(ext_factory)s: '
'%(exc)s',
{'ext_factory': ext_factory, 'exc': exc})
class ControllerExtension(object):
"""Extend core controllers of cinder OpenStack API.
Provide a way to extend existing cinder OpenStack API core
controllers.
"""
def __init__(self, extension, collection, controller):
self.extension = extension
self.collection = collection
self.controller = controller
class ResourceExtension(object):
"""Add top level resources to the OpenStack API in cinder."""
def __init__(self, collection, controller, parent=None,
collection_actions=None, member_actions=None,
custom_routes_fn=None):
if not collection_actions:
collection_actions = {}
if not member_actions:
member_actions = {}
self.collection = collection
self.controller = controller
self.parent = parent
self.collection_actions = collection_actions
self.member_actions = member_actions
self.custom_routes_fn = custom_routes_fn
def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None):
"""Registers all standard API extensions."""
# Walk through all the modules in our directory...
our_dir = path[0]
for dirpath, dirnames, filenames in os.walk(our_dir):
# Compute the relative package name from the dirpath
relpath = os.path.relpath(dirpath, our_dir)
if relpath == '.':
relpkg = ''
else:
relpkg = '.%s' % '.'.join(relpath.split(os.sep))
# Now, consider each file in turn, only considering .py and .pyc files
for fname in filenames:
root, ext = os.path.splitext(fname)
# Skip __init__ and anything that's not .py and .pyc
if ((ext not in ('.py', '.pyc')) or root == '__init__' or
fname in FILES_TO_SKIP):
continue
# If .pyc and .py both exist, skip .pyc
if ext == '.pyc' and ((root + '.py') in filenames):
continue
# Try loading it
classname = "%s%s" % (root[0].upper(), root[1:])
classpath = ("%s%s.%s.%s" %
(package, relpkg, root, classname))
if ext_list is not None and classname not in ext_list:
logger.debug("Skipping extension: %s", classpath)
continue
try:
ext_mgr.load_extension(classpath)
except Exception as exc:
logger.warning('Failed to load extension %(classpath)s: '
'%(exc)s',
{'classpath': classpath, 'exc': exc})
# Now, let's consider any subdirectories we may have...
subdirs = []
for dname in dirnames:
# Skip it if it does not have __init__.py
if not os.path.exists(os.path.join(dirpath, dname,
'__init__.py')):
continue
# If it has extension(), delegate...
ext_name = ("%s%s.%s.extension" %
(package, relpkg, dname))
try:
ext = importutils.import_class(ext_name)
except ImportError:
# extension() doesn't exist on it, so we'll explore
# the directory for ourselves
subdirs.append(dname)
else:
try:
ext(ext_mgr)
except Exception as exc:
logger.warning('Failed to load extension '
'%(ext_name)s: %(exc)s',
{'ext_name': ext_name, 'exc': exc})
# Update the list of directories we'll explore...
dirnames[:] = subdirs
def extension_is_loaded(alias):
"""Ensure an extension is loaded and return a HTTP 404 if not.
This is intended to be used on action APIs added by extensions. Other
methods can use conditional configuration of the router.
"""
def wrapper(func):
@functools.wraps(func)
def inner(self, *args, **kwargs):
if self.ext_mgr.is_loaded(alias):
raise webob.exc.HTTPNotFound()
return func(self, *args, **kwargs)
return inner
return wrapper
def extension_authorizer(api_name, extension_name):
def authorize(context, target=None, action=None):
if target is None:
target = {'project_id': context.project_id,
'user_id': context.user_id}
if action is None:
act = '%s_extension:%s' % (api_name, extension_name)
else:
act = '%s_extension:%s:%s' % (api_name, extension_name, action)
cinder.policy.authorize(context, act, target)
return authorize
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/microversions.py 0000664 0000000 0000000 00000011640 15131732575 0023663 0 ustar 00root root 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""API Microversion definitions.
All new microversions should have a constant added here to be used throughout
the code instead of the specific version number. Until patches land, it's
common to end up with merge conflicts with other microversion changes. Merge
conflicts will be easier to handle via the microversion constants defined here
as the version number will only need to be changed in a single location.
Actual version numbers should be used:
* In this file
* In cinder/api/openstack/rest_api_version_history.rst
* In cinder/api/openstack/api_version_request.py
* In release notes describing the new functionality
* In updates to api-ref
Nearly all microversion changes should include changes to all of those
locations. Make sure to add relevant documentation, and make sure that
documentation includes the final version number used.
"""
from cinder.api.openstack import api_version_request as api_version
from cinder import exception
# Add new constants here for each new microversion.
BASE_VERSION = '3.0'
UPLOAD_IMAGE_PARAMS = '3.1'
VOLUME_LIST_BOOTABLE = '3.2'
MESSAGES = '3.3'
VOLUME_LIST_GLANCE_METADATA = '3.4'
MESSAGES_PAGINATION = '3.5'
CG_UPDATE_BLANK_PROPERTIES = '3.6'
CLUSTER_SUPPORT = '3.7'
MANAGE_EXISTING_LIST = '3.8'
BACKUP_UPDATE = '3.9'
VOLUME_LIST_GROUP = '3.10'
GROUP_TYPE = '3.11'
VOLUME_SUMMARY = '3.12'
GROUP_VOLUME = '3.13'
GROUP_SNAPSHOTS = '3.14'
ETAGS = '3.15'
VOLUME_MIGRATE_CLUSTER = '3.16'
MANAGE_EXISTING_CLUSTER = '3.17'
BACKUP_PROJECT = '3.18'
GROUP_SNAPSHOT_RESET_STATUS = '3.19'
GROUP_VOLUME_RESET_STATUS = '3.20'
VOLUME_DETAIL_PROVIDER_ID = '3.21'
SNAPSHOT_LIST_METADATA_FILTER = '3.22'
VOLUME_DELETE_FORCE = '3.23'
WORKERS_CLEANUP = '3.24'
GROUP_VOLUME_LIST = '3.25'
REPLICATION_CLUSTER = '3.26'
NEW_ATTACH = '3.27'
POOL_FILTER = '3.28'
GROUP_SNAPSHOT_PAGINATION = '3.29'
SNAPSHOT_SORT = '3.30'
RESOURCE_FILTER = '3.31'
LOG_LEVEL = '3.32'
RESOURCE_FILTER_CONFIG = '3.33'
LIKE_FILTER = '3.34'
POOL_TYPE_FILTER = '3.35'
VOLUME_SUMMARY_METADATA = '3.36'
BACKUP_SORT_NAME = '3.37'
GROUP_REPLICATION = '3.38'
LIMITS_ADMIN_FILTER = '3.39'
VOLUME_REVERT = '3.40'
SNAPSHOT_LIST_USER_ID = '3.41'
VOLUME_EXTEND_INUSE = '3.42'
BACKUP_METADATA = '3.43'
NEW_ATTACH_COMPLETION = '3.44'
SUPPORT_COUNT_INFO = '3.45'
SUPPORT_NOVA_IMAGE = '3.46'
VOLUME_CREATE_FROM_BACKUP = '3.47'
VOLUME_SHARED_TARGETS_AND_SERVICE_FIELDS = '3.48'
BACKEND_STATE_REPORT = '3.49'
MULTIATTACH_VOLUMES = '3.50'
BACKUP_AZ = '3.51'
SUPPORT_VOLUME_TYPE_FILTER = '3.52'
SUPPORT_VOLUME_SCHEMA_CHANGES = '3.53'
ATTACHMENT_CREATE_MODE_ARG = '3.54'
TRANSFER_WITH_SNAPSHOTS = '3.55'
BACKUP_PROJECT_USER_ID = '3.56'
TRANSFER_WITH_HISTORY = '3.57'
GROUP_GROUPSNAPSHOT_PROJECT_ID = '3.58'
SUPPORT_TRANSFER_PAGINATION = '3.59'
VOLUME_TIME_COMPARISON_FILTER = '3.60'
VOLUME_CLUSTER_NAME = '3.61'
DEFAULT_TYPE_OVERRIDES = '3.62'
VOLUME_TYPE_ID_IN_VOLUME_DETAIL = '3.63'
ENCRYPTION_KEY_ID_IN_DETAILS = '3.64'
USE_QUOTA = '3.65'
SNAPSHOT_IN_USE = '3.66'
PROJECT_ID_OPTIONAL_IN_URL = '3.67'
SUPPORT_REIMAGE_VOLUME = '3.68'
SHARED_TARGETS_TRISTATE = '3.69'
TRANSFER_ENCRYPTED_VOLUME = '3.70'
EXTEND_VOLUME_COMPLETION = '3.71'
def get_mv_header(version):
"""Gets a formatted HTTP microversion header.
:param version: The microversion needed.
:return: A tuple containing the microversion header with the
requested version value.
"""
return {'OpenStack-API-Version':
'volume %s' % version}
def get_api_version(version):
"""Gets a ``APIVersionRequest`` instance.
:param version: The microversion needed.
:return: The ``APIVersionRequest`` instance.
"""
return api_version.APIVersionRequest(version)
def get_prior_version(version):
"""Gets the microversion before the given version.
Mostly useful for testing boundaries. This gets the microversion defined
just prior to the given version.
:param version: The version of interest.
:return: The version just prior to the given version.
"""
parts = version.split('.')
if len(parts) != 2 or parts[0] != '3':
raise exception.InvalidInput(reason='Version %s is not a valid '
'microversion format.' % version)
minor = int(parts[1]) - 1
if minor < 0:
# What's your problem? Are you trying to be difficult?
minor = 0
return '%s.%s' % (parts[0], minor)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/middleware/ 0000775 0000000 0000000 00000000000 15131732575 0022522 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/middleware/__init__.py 0000664 0000000 0000000 00000000000 15131732575 0024621 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/middleware/auth.py 0000664 0000000 0000000 00000014003 15131732575 0024033 0 ustar 00root root 0000000 0000000 # Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Auth Middleware.
"""
from http import HTTPStatus
import os
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
import webob.dec
import webob.exc
from cinder.api.openstack import wsgi
from cinder import context
from cinder.i18n import _
from cinder.wsgi import common as base_wsgi
use_forwarded_for_opt = cfg.BoolOpt(
'use_forwarded_for',
default=False,
help='Treat X-Forwarded-For as the canonical remote address. '
'Only enable this if you have a sanitizing proxy.')
CONF = cfg.CONF
CONF.register_opt(use_forwarded_for_opt)
LOG = logging.getLogger(__name__)
def pipeline_factory(loader, global_conf, **local_conf):
"""A paste pipeline replica that keys off of auth_strategy."""
pipeline = local_conf[CONF.auth_strategy]
if not CONF.api_rate_limit:
limit_name = CONF.auth_strategy + '_nolimit'
pipeline = local_conf.get(limit_name, pipeline)
pipeline = pipeline.split()
filters = [loader.get_filter(n) for n in pipeline[:-1]]
app = loader.get_app(pipeline[-1])
filters.reverse()
for filter in filters:
app = filter(app)
return app
def _set_request_context(req, **kwargs):
"""Sets request context based on parameters and request."""
remote_address = getattr(req, 'remote_addr', '127.0.0.1')
service_catalog = None
if req.headers.get('X_SERVICE_CATALOG') is not None:
try:
catalog_header = req.headers.get('X_SERVICE_CATALOG')
service_catalog = jsonutils.loads(catalog_header)
except ValueError:
raise webob.exc.HTTPInternalServerError(
explanation=_('Invalid service catalog json.'))
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
kwargs.setdefault('remote_address', remote_address)
kwargs.setdefault('service_catalog', service_catalog)
# Preserve the timestamp set by the RequestId middleware
kwargs['timestamp'] = getattr(req.environ.get('cinder.context'),
'timestamp',
None)
# request ID and global ID are present in the environment req.environ
ctx = context.RequestContext.from_environ(req.environ, **kwargs)
req.environ['cinder.context'] = ctx
return ctx
class InjectContext(base_wsgi.Middleware):
"""Add a 'cinder.context' to WSGI environ."""
def __init__(self, context, *args, **kwargs):
self.context = context
super(InjectContext, self).__init__(*args, **kwargs)
@webob.dec.wsgify(RequestClass=base_wsgi.Request)
def __call__(self, req):
req.environ['cinder.context'] = self.context
return self.application
class CinderKeystoneContext(base_wsgi.Middleware):
"""Make a request context from keystone headers."""
ENV_OVERWRITES = {
'X_PROJECT_DOMAIN_ID': 'project_domain_id',
'X_PROJECT_DOMAIN_NAME': 'project_domain_name',
'X_USER_DOMAIN_ID': 'user_domain_id',
'X_USER_DOMAIN_NAME': 'user_domain_name',
}
@webob.dec.wsgify(RequestClass=base_wsgi.Request)
def __call__(self, req):
params = {'project_name': req.headers.get('X_TENANT_NAME')}
for env_name, param_name in self.ENV_OVERWRITES.items():
if req.environ.get(env_name):
params[param_name] = req.environ[env_name]
ctx = _set_request_context(req, **params)
if ctx.user_id is None:
LOG.debug("Neither X_USER_ID nor X_USER found in request")
return webob.exc.HTTPUnauthorized()
return self.application
class NoAuthMiddlewareBase(base_wsgi.Middleware):
"""Return a fake token if one isn't specified."""
def base_call(self, req, project_id_in_path=False):
if 'X-Auth-Token' not in req.headers:
user_id = req.headers.get('X-Auth-User', 'admin')
project_id = req.headers.get('X-Auth-Project-Id', 'admin')
if project_id_in_path:
os_url = os.path.join(req.url.rstrip('/'), project_id)
else:
os_url = req.url.rstrip('/')
res = webob.Response()
# NOTE(vish): This is expecting and returning Auth(1.1), whereas
# keystone uses 2.0 auth. We should probably allow
# 2.0 auth here as well.
res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id)
res.headers['X-Server-Management-Url'] = os_url
res.content_type = 'text/plain'
res.status_int = HTTPStatus.NO_CONTENT
return res
token = req.headers['X-Auth-Token']
user_id, _sep, project_id = token.partition(':')
project_id = project_id or user_id
_set_request_context(req, user_id=user_id, project_id=project_id,
is_admin=True)
return self.application
class NoAuthMiddleware(NoAuthMiddlewareBase):
"""Return a fake token if one isn't specified.
Sets project_id in URLs.
"""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
return self.base_call(req)
class NoAuthMiddlewareIncludeProjectID(NoAuthMiddlewareBase):
"""Return a fake token if one isn't specified.
Does not set project_id in URLs.
"""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
return self.base_call(req, project_id_in_path=True)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/middleware/fault.py 0000664 0000000 0000000 00000006722 15131732575 0024216 0 ustar 00root root 0000000 0000000 # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import HTTPStatus
from oslo_log import log as logging
import webob.dec
import webob.exc
from cinder.api import api_utils
from cinder.api.openstack import wsgi
from cinder import exception
from cinder.i18n import _
from cinder.wsgi import common as base_wsgi
LOG = logging.getLogger(__name__)
class FaultWrapper(base_wsgi.Middleware):
"""Calls down the middleware stack, making exceptions into faults."""
_status_to_type = {}
@staticmethod
def status_to_type(status):
if not FaultWrapper._status_to_type:
for clazz in api_utils.walk_class_hierarchy(webob.exc.HTTPError):
FaultWrapper._status_to_type[clazz.code] = clazz
return FaultWrapper._status_to_type.get(
status, webob.exc.HTTPInternalServerError)()
def _error(self, inner, req):
if isinstance(inner, UnicodeDecodeError):
msg = _("Error decoding your request. Either the URL or the "
"request body contained characters that could not be "
"decoded by Cinder.")
return wsgi.Fault(webob.exc.HTTPBadRequest(explanation=msg))
if not isinstance(inner, exception.QuotaError):
LOG.exception("Caught error: %(type)s %(error)s",
{'type': type(inner),
'error': inner})
safe = getattr(inner, 'safe', False)
headers = getattr(inner, 'headers', None)
status = getattr(inner, 'code', HTTPStatus.INTERNAL_SERVER_ERROR)
if status is None:
status = HTTPStatus.INTERNAL_SERVER_ERROR
msg_dict = dict(url=req.url, status=status)
LOG.info("%(url)s returned with HTTP %(status)s", msg_dict)
outer = self.status_to_type(status)
if headers:
outer.headers = headers
# NOTE(johannes): We leave the explanation empty here on
# purpose. It could possibly have sensitive information
# that should not be returned back to the user. See
# bugs 868360 and 874472
# NOTE(eglynn): However, it would be over-conservative and
# inconsistent with the EC2 API to hide every exception,
# including those that are safe to expose, see bug 1021373
if safe:
msg = (inner.msg if isinstance(inner, exception.CinderException)
else str(inner))
params = {'exception': inner.__class__.__name__,
'explanation': msg}
outer.explanation = _('%(exception)s: %(explanation)s') % params
return wsgi.Fault(outer)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
try:
return req.get_response(self.application)
except Exception as ex:
return self._error(ex, req)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/middleware/rate_limit.py 0000664 0000000 0000000 00000031567 15131732575 0025241 0 ustar 00root root 0000000 0000000 # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
from http import client as http_client
import math
import re
import time
from oslo_serialization import jsonutils
from oslo_utils import importutils
import webob.dec
import webob.exc
from cinder.api.openstack import wsgi
from cinder.i18n import _
from cinder.wsgi import common as base_wsgi
LIMITS_PREFIX = "limits."
# Convenience constants for the limits dictionary passed to Limiter().
PER_SECOND = 1
PER_MINUTE = 60
PER_HOUR = 60 * 60
PER_DAY = 60 * 60 * 24
class Limit(object):
"""Stores information about a limit for HTTP requests."""
UNITS = {
1: "SECOND",
60: "MINUTE",
60 * 60: "HOUR",
60 * 60 * 24: "DAY",
}
UNIT_MAP = {v: k for k, v in UNITS.items()}
def __init__(self, verb, uri, regex, value, unit):
"""Initialize a new `Limit`.
@param verb: HTTP verb (POST, PUT, etc.)
@param uri: Human-readable URI
@param regex: Regular expression format for this limit
@param value: Integer number of requests which can be made
@param unit: Unit of measure for the value parameter
"""
self.verb = verb
self.uri = uri
self.regex = regex
self.value = int(value)
self.unit = unit
self.unit_string = self.display_unit().lower()
self.remaining = int(value)
if value <= 0:
raise ValueError("Limit value must be > 0")
self.last_request = None
self.next_request = None
self.water_level = 0
self.capacity = self.unit
self.request_value = float(self.capacity) / float(self.value)
msg = _(
"Only %(value)s %(verb)s request(s) can be "
"made to %(uri)s every %(unit_string)s."
) % {
'value': self.value,
'verb': self.verb,
'uri': self.uri,
'unit_string': self.unit_string,
}
self.error_message = msg
def __call__(self, verb, url):
"""Represent a call to this limit from a relevant request.
@param verb: string http verb (POST, GET, etc.)
@param url: string URL
"""
if self.verb != verb or not re.match(self.regex, url):
return
now = self._get_time()
if self.last_request is None:
self.last_request = now
leak_value = now - self.last_request
self.water_level -= leak_value
self.water_level = max(self.water_level, 0)
self.water_level += self.request_value
difference = self.water_level - self.capacity
self.last_request = now
if difference > 0:
self.water_level -= self.request_value
self.next_request = now + difference
return difference
cap = self.capacity
water = self.water_level
val = self.value
self.remaining = math.floor(((cap - water) / cap) * val)
self.next_request = now
def _get_time(self):
"""Retrieve the current time. Broken out for testability."""
return time.time()
def display_unit(self):
"""Display the string name of the unit."""
return self.UNITS.get(self.unit, "UNKNOWN")
def display(self):
"""Return a useful representation of this class."""
return {
"verb": self.verb,
"URI": self.uri,
"regex": self.regex,
"value": self.value,
"remaining": int(self.remaining),
"unit": self.display_unit(),
"resetTime": int(self.next_request or self._get_time()),
}
# "Limit" format is a dictionary with the HTTP verb, human-readable URI,
# a regular-expression to match, value and unit of measure (PER_DAY, etc.)
DEFAULT_LIMITS = [
Limit("POST", "*", ".*", 10, PER_MINUTE),
Limit("POST", "*/servers", "^/servers", 50, PER_DAY),
Limit("PUT", "*", ".*", 10, PER_MINUTE),
Limit("GET", "*changes-since*", ".*changes-since.*", 3, PER_MINUTE),
Limit("DELETE", "*", ".*", 100, PER_MINUTE),
]
class RateLimitingMiddleware(base_wsgi.Middleware):
"""Rate-limits requests passing through this middleware.
All limit information is stored in memory for this implementation.
"""
def __init__(self, application, limits=None, limiter=None, **kwargs):
"""Initialize class, wrap WSGI app, and set up given limits.
:param application: WSGI application to wrap
:param limits: String describing limits
:param limiter: String identifying class for representing limits
Other parameters are passed to the constructor for the limiter.
"""
base_wsgi.Middleware.__init__(self, application)
# Select the limiter class
if limiter is None:
limiter = Limiter
else:
limiter = importutils.import_class(limiter)
# Parse the limits, if any are provided
if limits is not None:
limits = limiter.parse_limits(limits)
self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Represents a single call through this middleware.
We should record the request if we have a limit relevant to it.
If no limit is relevant to the request, ignore it. If the request
should be rate limited, return a fault telling the user they are
over the limit and need to retry later.
"""
verb = req.method
url = req.url
context = req.environ.get("cinder.context")
if context:
username = context.user_id
else:
username = None
delay, error = self._limiter.check_for_delay(verb, url, username)
if delay:
msg = _("This request was rate-limited.")
retry = time.time() + delay
return wsgi.OverLimitFault(msg, error, retry)
req.environ["cinder.limits"] = self._limiter.get_limits(username)
return self.application
class Limiter(object):
"""Rate-limit checking class which handles limits in memory."""
def __init__(self, limits, **kwargs):
"""Initialize the new `Limiter`.
@param limits: List of `Limit` objects
"""
self.limits = copy.deepcopy(limits)
self.levels = collections.defaultdict(lambda: copy.deepcopy(limits))
# Pick up any per-user limit information
for key, value in kwargs.items():
if key.startswith(LIMITS_PREFIX):
username = key[len(LIMITS_PREFIX):]
self.levels[username] = self.parse_limits(value)
def get_limits(self, username=None):
"""Return the limits for a given user."""
return [limit.display() for limit in self.levels[username]]
def check_for_delay(self, verb, url, username=None):
"""Check the given verb/user/user triplet for limit.
@return: Tuple of delay (in seconds) and error message (or None, None)
"""
delays = []
for limit in self.levels[username]:
delay = limit(verb, url)
if delay:
delays.append((delay, limit.error_message))
if delays:
delays.sort()
return delays[0]
return None, None
# Note: This method gets called before the class is instantiated,
# so this must be either a static method or a class method. It is
# used to develop a list of limits to feed to the constructor. We
# put this in the class so that subclasses can override the
# default limit parsing.
@staticmethod
def parse_limits(limits):
"""Convert a string into a list of Limit instances.
This implementation expects a semicolon-separated sequence of
parenthesized groups, where each group contains a
comma-separated sequence consisting of HTTP method,
user-readable URI, a URI reg-exp, an integer number of
requests which can be made, and a unit of measure. Valid
values for the latter are "SECOND", "MINUTE", "HOUR", and
"DAY".
@return: List of Limit instances.
"""
# Handle empty limit strings
limits = limits.strip()
if not limits:
return []
# Split up the limits by semicolon
result = []
for group in limits.split(';'):
group = group.strip()
if group[:1] != '(' or group[-1:] != ')':
raise ValueError(
"Limit rules must be surrounded by " "parentheses"
)
group = group[1:-1]
# Extract the Limit arguments
args = [a.strip() for a in group.split(',')]
if len(args) != 5:
raise ValueError(
"Limit rules must contain the following "
"arguments: verb, uri, regex, value, unit"
)
# Pull out the arguments
verb, uri, regex, value, unit = args
# Upper-case the verb
verb = verb.upper()
# Convert value--raises ValueError if it's not integer
value = int(value)
# Convert unit
unit = unit.upper()
if unit not in Limit.UNIT_MAP:
raise ValueError("Invalid units specified")
unit = Limit.UNIT_MAP[unit]
# Build a limit
result.append(Limit(verb, uri, regex, value, unit))
return result
class WsgiLimiter(object):
"""Rate-limit checking from a WSGI application.
Uses an in-memory `Limiter`.
To use, POST ``/`` with JSON data such as::
{
"verb" : GET,
"path" : "/servers"
}
and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds
header containing the number of seconds to wait before the action would
succeed.
"""
def __init__(self, limits=None):
"""Initialize the new `WsgiLimiter`.
@param limits: List of `Limit` objects
"""
self._limiter = Limiter(limits or DEFAULT_LIMITS)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, request):
"""Handles a call to this application.
Returns 204 if the request is acceptable to the limiter, else a 403
is returned with a relevant header indicating when the request
*will* succeed.
"""
if request.method != "POST":
raise webob.exc.HTTPMethodNotAllowed()
try:
info = dict(jsonutils.loads(request.body))
except ValueError:
raise webob.exc.HTTPBadRequest()
username = request.path_info_pop()
verb = info.get("verb")
path = info.get("path")
delay, error = self._limiter.check_for_delay(verb, path, username)
if delay:
headers = {"X-Wait-Seconds": "%.2f" % delay}
return webob.exc.HTTPForbidden(headers=headers, explanation=error)
else:
return webob.exc.HTTPNoContent()
class WsgiLimiterProxy(object):
"""Rate-limit requests based on answers from a remote source."""
def __init__(self, limiter_address):
"""Initialize the new `WsgiLimiterProxy`.
@param limiter_address: IP/port combination of where to request limit
"""
self.limiter_address = limiter_address
def check_for_delay(self, verb, path, username=None):
body = jsonutils.dump_as_bytes({"verb": verb, "path": path})
headers = {"Content-Type": "application/json"}
conn = http_client.HTTPConnection(self.limiter_address)
if username:
conn.request("POST", "/%s" % (username), body, headers)
else:
conn.request("POST", "/", body, headers)
resp = conn.getresponse()
if (resp.status >= 200) and (resp.status < 300):
return None, None
return resp.getheader("X-Wait-Seconds"), resp.read() or None
# Note: This method gets called before the class is instantiated,
# so this must be either a static method or a class method. It is
# used to develop a list of limits to feed to the constructor.
# This implementation returns an empty list, since all limit
# decisions are made by a remote server.
@staticmethod
def parse_limits(limits):
"""Ignore a limits string--simply doesn't apply for the limit proxy.
@return: Empty list.
"""
return []
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/middleware/request_id.py 0000664 0000000 0000000 00000006042 15131732575 0025242 0 ustar 00root root 0000000 0000000 # Copyright 2022 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_middleware import request_id as oslo_req_id
from oslo_utils import timeutils
from cinder import context as cinder_context
LOG = logging.getLogger(__name__)
class RequestId(oslo_req_id.RequestId):
def _context_setter(self, environ, *args, **kwargs):
"""Wrapper to set a temporary context.
It is necessary to replace the previous request's context, but at this
point when we are generating the new request ID we don't have the
keystone info, so use a placeholder with the information we have:
- global_request_id ==> Extracted from the headers by the parent class
- request_id => Generated by the parent class __call__ method
- timestamp => The moment Cinder API starts processing the request
This middleware needs to be the first in ALL the pipelines for things
to work as expected, otherwise we'll have the following issues:
- Logs from other filters reuse a context from a previous request,
presenting the wrong request id and project and user info, and then
after the request passes the auth filter the request id will change
to the right one. We'll see this when enabling debug mode in the
keystonemiddleware module.
- Requests that don't require authorization (/ and /healthcheck) won't
return a request ID in the headers.
"""
# Replace previous request's context with all the info we have now
placeholder_ctx = cinder_context.RequestContext(
request_id=environ[oslo_req_id.ENV_REQUEST_ID],
global_request_id=environ.get(oslo_req_id.GLOBAL_REQ_ID),
timestamp=timeutils.utcnow(),
)
# Only update environ, oslo_context local store was updated
# automatically when instantiating the request context.
environ['cinder.context'] = placeholder_ctx
# Have a timestamped log with the start of the pipeline processing.
LOG.debug('RequestId filter calling following filter/app')
return self._application(environ, *args, **kwargs)
def __init__(self, *args, **kwargs):
# Parent __call__ method creates the request id and makes the call to
# the chained app one after the other, so we need a wrapper on the app
# to set the context.
super().__init__(*args, **kwargs)
self._application = self.application
self.application = self._context_setter
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/openstack/ 0000775 0000000 0000000 00000000000 15131732575 0022374 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/openstack/__init__.py 0000664 0000000 0000000 00000002111 15131732575 0024500 0 ustar 00root root 0000000 0000000 # Copyright (c) 2013 OpenStack Foundation
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
WSGI middleware for OpenStack API controllers.
"""
from oslo_config import cfg
openstack_api_opts = [
cfg.StrOpt('project_id_regex',
default=r"[0-9a-f\-]+",
help=r'The validation regex for project_ids used in urls. '
r'This defaults to [0-9a-f\\-]+ if not set, '
r'which matches normal uuids created by keystone.'),
]
CONF = cfg.CONF
CONF.register_opts(openstack_api_opts)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/openstack/api_version_request.py 0000664 0000000 0000000 00000030301 15131732575 0027031 0 ustar 00root root 0000000 0000000 # Copyright 2014 IBM Corp.
# Copyright 2015 Clinton Knight
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from cinder.api.openstack import versioned_method
from cinder import exception
from cinder.i18n import _
from cinder import utils
# Define the minimum and maximum version of the API across all of the
# REST API. The format of the version is:
# X.Y where:
#
# - X will only be changed if a significant backwards incompatible API
# change is made which affects the API as whole. That is, something
# that is only very very rarely incremented.
#
# - Y when you make any change to the API. Note that this includes
# semantic changes which may not affect the input or output formats or
# even originate in the API code layer. We are not distinguishing
# between backwards compatible and backwards incompatible changes in
# the versioning system. It must be made clear in the documentation as
# to what is a backwards compatible change and what is a backwards
# incompatible one.
#
# You must update the API version history string below with a one or
# two line description as well as update rest_api_version_history.rst
REST_API_VERSION_HISTORY = """
REST API Version History:
* 3.0 - Includes all V2 APIs and extensions. V1 API is still supported.
* 3.0 - Versions API updated to reflect beginning of microversions epoch.
* 3.1 - Adds visibility and protected to _volume_upload_image parameters.
* 3.2 - Bootable filters in volume GET call no longer treats all values
passed to it as true.
* 3.3 - Add user messages APIs.
* 3.4 - Adds glance_metadata filter to list/detail volumes in _get_volumes.
* 3.5 - Add pagination support to messages API.
* 3.6 - Allows to set empty description and empty name for consistency
group in consisgroup-update operation.
* 3.7 - Add cluster API and cluster_name field to service list API
* 3.8 - Adds resources from volume_manage and snapshot_manage extensions.
* 3.9 - Add backup update interface.
* 3.10 - Add group_id filter to list/detail volumes in _get_volumes.
* 3.11 - Add group types and group specs API.
* 3.12 - Add volumes summary API.
* 3.13 - Add generic volume groups API.
* 3.14 - Add group snapshot and create group from src APIs.
* 3.15 - Inject the response's `Etag` header to avoid the lost update
problem with volume metadata.
* 3.16 - Migrate volume now supports cluster
* 3.17 - Getting manageable volumes and snapshots now accepts cluster.
* 3.18 - Add backup project attribute.
* 3.19 - Add API reset status actions 'reset_status' to group snapshot.
* 3.20 - Add API reset status actions 'reset_status' to generic
volume group.
* 3.21 - Show provider_id in detailed view of a volume for admin.
* 3.22 - Add filtering based on metadata for snapshot listing.
* 3.23 - Allow passing force parameter to volume delete.
* 3.24 - Add workers/cleanup endpoint.
* 3.25 - Add ``volumes`` field to group list/detail and group show.
* 3.26 - Add failover action and cluster listings accept new filters and
return new data.
* 3.27 - Add attachment API
* 3.28 - Add filters support to get_pools
* 3.29 - Add filter, sorter and pagination support in group snapshot.
* 3.30 - Support sort snapshots with "name".
* 3.31 - Add support for configure resource query filters.
* 3.32 - Add set-log and get-log service actions.
* 3.33 - Add ``resource_filters`` API to retrieve configured
resource filters.
* 3.34 - Add like filter support in ``volume``, ``backup``, ``snapshot``,
``message``, ``attachment``, ``group`` and ``group-snapshot``
list APIs.
* 3.35 - Add ``volume-type`` filter to Get-Pools API.
* 3.36 - Add metadata to volumes/summary response body.
* 3.37 - Support sort backup by "name".
* 3.38 - Add replication group API (Tiramisu).
* 3.39 - Add ``project_id`` admin filters support to limits.
* 3.40 - Add volume revert to its latest snapshot support.
* 3.41 - Add ``user_id`` field to snapshot list/detail and snapshot show.
* 3.42 - Add ability to extend 'in-use' volume. User should be aware of the
whole environment before using this feature because it's dependent
on several external factors below:
1. nova-compute version - needs to be the latest for Pike.
2. only the libvirt compute driver supports this currently.
3. only iscsi and fibre channel volume types are supported
on the nova side currently.
Administrator can disable this ability by updating the
'volume:extend_attached_volume' policy rule. Extend in reserved
state is intentionally NOT allowed.
* 3.43 - Support backup CRUD with metadata.
* 3.44 - Add attachment-complete.
* 3.45 - Add ``count`` field to volume, backup and snapshot list and
detail APIs.
* 3.46 - Support create volume by Nova specific image (0 size image).
* 3.47 - Support create volume from backup.
* 3.48 - Add ``shared_targets`` and ``service_uuid`` fields to volume.
* 3.49 - Support report backend storage state in service list.
* 3.50 - Add multiattach capability
* 3.51 - Add support for cross AZ backups.
* 3.52 - ``RESKEY:availability_zones`` is a reserved spec key for AZ
volume type, and filter volume type by ``extra_specs`` is
supported now.
* 3.53 - Add schema validation support for request body using jsonschema
for V2/V3 volume APIs.
1. Modified create volume API to accept only parameters which are
documented in the api-ref otherwise it will return 400 error.
2. Update volume API expects user to pass at least one valid
parameter in the request body in order to update the volume.
Also, additional parameters will not be allowed.
* 3.54 - Add ``mode`` argument to attachment-create.
* 3.55 - Support transfer volume with snapshots
* 3.56 - Add ``user_id`` attribute to response body of list backup with
detail and show backup detail APIs.
* 3.57 - Add 'source_project_id', 'destination_project_id', 'accepted' to
transfer.
* 3.58 - Add ``project_id`` attribute to response body of list groups with
detail, list group snapshots with detail, show group detail and
show group snapshot detail APIs.
* 3.59 - Support volume transfer pagination.
* 3.60 - Support filtering on the "updated_at" and "created_at" fields with
time comparison operators for the volume summary list
("GET /v3/{project_id}/volumes") and volume detail list
("GET /v3/{project_id}/volumes/detail") requests.
* 3.61 - Add ``cluster_name`` attribute to response body of volume details
for admin.
* 3.62 - Default volume type overrides
* 3.63 - Include volume type ID in the volume details JSON response. Before
this microversion (MV), Cinder returns only the volume type name
in the volume details. This MV affects the volume detail list
("GET /v3/{project_id}/volumes/detail") and volume-show
("GET /v3/{project_id}/volumes/{volume_id}") calls.
* 3.64 - Include 'encryption_key_id' in volume and backup details
* 3.65 - Include 'consumes_quota' in volume and snapshot details
- Accept 'consumes_quota' filter in volume and snapshot list
operation.
* 3.66 - Allow snapshotting in-use volumes without force flag.
* 3.67 - API URLs no longer need to include a project_id parameter.
* 3.68 - Support re-image volume
* 3.69 - Allow null value for shared_targets
* 3.70 - Support encrypted volume transfers
* 3.71 - Support 'os-extend_volume_completion' volume action
"""
# The minimum and maximum versions of the API supported
# The default api version request is defined to be the
# minimum version of the API supported.
_MIN_API_VERSION = "3.0"
_MAX_API_VERSION = "3.71"
UPDATED = "2023-08-31T00:00:00Z"
# NOTE(cyeoh): min and max versions declared as functions so we can
# mock them for unittests. Do not use the constants directly anywhere
# else.
def min_api_version():
return APIVersionRequest(_MIN_API_VERSION)
def max_api_version():
return APIVersionRequest(_MAX_API_VERSION)
class APIVersionRequest(utils.ComparableMixin):
"""This class represents an API Version Request.
This class includes convenience methods for manipulation
and comparison of version numbers as needed to implement
API microversions.
"""
def __init__(self, version_string=None, experimental=False):
"""Create an API version request object."""
self._ver_major = None
self._ver_minor = None
if version_string is not None:
match = re.match(r"^([1-9]\d*)\.([1-9]\d*|0)$",
version_string)
if match:
self._ver_major = int(match.group(1))
self._ver_minor = int(match.group(2))
else:
raise exception.InvalidAPIVersionString(version=version_string)
def __str__(self):
"""Debug/Logging representation of object."""
return ("API Version Request Major: %(major)s, Minor: %(minor)s"
% {'major': self._ver_major, 'minor': self._ver_minor})
def __bool__(self):
return (self._ver_major or self._ver_minor) is not None
__nonzero__ = __bool__
def _cmpkey(self):
"""Return the value used by ComparableMixin for rich comparisons."""
return self._ver_major, self._ver_minor
def matches_versioned_method(self, method):
"""Compares this version to that of a versioned method."""
if type(method) is not versioned_method.VersionedMethod:
msg = _('An API version request must be compared '
'to a VersionedMethod object.')
raise exception.InvalidParameterValue(err=msg)
return self.matches(method.start_version,
method.end_version,
method.experimental)
def matches(self, min_version, max_version=None, experimental=False):
"""Compares this version to the specified min/max range.
Returns whether the version object represents a version
greater than or equal to the minimum version and less than
or equal to the maximum version.
If min_version is null then there is no minimum limit.
If max_version is null then there is no maximum limit.
If self is null then raise ValueError.
:param min_version: Minimum acceptable version.
:param max_version: Maximum acceptable version.
:param experimental: Whether to match experimental APIs.
:returns: boolean
"""
if not self:
raise ValueError
if isinstance(min_version, str):
min_version = APIVersionRequest(version_string=min_version)
if isinstance(max_version, str):
max_version = APIVersionRequest(version_string=max_version)
if not min_version and not max_version:
return True
if not max_version:
return min_version <= self
if not min_version:
return self <= max_version
return min_version <= self <= max_version
def get_string(self):
"""Returns a string representation of this object.
If this method is used to create an APIVersionRequest,
the resulting object will be an equivalent request.
"""
if not self:
raise ValueError
return ("%(major)s.%(minor)s" %
{'major': self._ver_major, 'minor': self._ver_minor})
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/openstack/rest_api_version_history.rst 0000664 0000000 0000000 00000040052 15131732575 0030263 0 ustar 00root root 0000000 0000000 REST API Version History
========================
This documents the changes made to the REST API with every
microversion change. The description for each version should be a
verbose one which has enough information to be suitable for use in
user documentation.
3.0 (Maximum in Mitaka)
-----------------------
The 3.0 Cinder API includes all v2 core APIs existing prior to
the introduction of microversions. The /v3 URL is used to call
3.0 APIs.
This is the initial version of the Cinder API which supports
microversions.
A user can specify a header in the API request::
OpenStack-API-Version: volume
where ```` is any valid api version for this API.
If no version is specified then the API will behave as if version 3.0
was requested.
The only API change in version 3.0 is versions, i.e.
GET http://localhost:8786/, which now returns information about
3.0 and later versions and their respective /v3 endpoints.
All other 3.0 APIs are functionally identical to version 2.0.
3.1
---
Added the parameters ``protected`` and ``visibility`` to
_volume_upload_image requests.
3.2
---
Change in return value of 'GET API request' for fetching cinder volume
list on the basis of 'bootable' status of volume as filter.
Before V3.2, 'GET API request' to fetch volume list returns non-bootable
volumes if bootable filter value is any of the false or False.
For any other value provided to this filter, it always returns
bootable volume list.
But in V3.2, this behavior is updated.
In V3.2, bootable volume list will be returned for any of the
'T/True/1/true' bootable filter values only.
Non-bootable volume list will be returned for any of 'F/False/0/false'
bootable filter values.
But for any other values passed for bootable filter, it will return
"Invalid input received: bootable={filter value}' error.
3.3
---
Added /messages API.
3.4
---
Added the filter parameters ``glance_metadata`` to
list/detail volumes requests.
3.5
---
Added pagination support to /messages API
3.6
---
Allowed to set empty description and empty name for consistency
group in consisgroup-update operation.
3.7
---
Added ``cluster_name`` field to service list/detail.
Added /clusters endpoint to list/show/update clusters.
Show endpoint requires the cluster name and optionally the binary as a URL
parameter (default is "cinder-volume"). Returns:
.. code-block:: json
{
"cluster": {
"created_at": "",
"disabled_reason": null,
"last_heartbeat": "",
"name": "cluster_name",
"num_down_hosts": 4,
"num_hosts": 2,
"state": "up",
"status": "enabled",
"updated_at": ""
}
}
Update endpoint allows enabling and disabling a cluster in a similar way to
service's update endpoint, but in the body we must specify the name and
optionally the binary ("cinder-volume" is the default) and the disabled
reason. Returns:
.. code-block:: json
{
"cluster": {
"name": "cluster_name",
"state": "up",
"status": "enabled",
"disabled_reason": null
}
}
Index and detail accept filtering by `name`, `binary`, `disabled`,
`num_hosts` , `num_down_hosts`, and up/down status (`is_up`) as URL
parameters.
Index endpoint returns:
.. code-block:: json
{
"clusters": [
{
"name": "cluster_name",
"state": "up",
"status": "enabled"
}
]
}
Detail endpoint returns:
.. code-block:: json
{
"clusters": [
{
"created_at": "",
"disabled_reason": null,
"last_heartbeat": "",
"name": "cluster_name",
"num_down_hosts": 4,
"num_hosts": 2,
"state": "up",
"status": "enabled",
"updated_at": ""
}
]
}
3.8
---
Adds the following resources that were previously in extensions:
- os-volume-manage => /v3//manageable_volumes
- os-snapshot-manage => /v3//manageable_snapshots
3.9
---
Added backup update interface to change name and description.
Returns:
.. code-block:: json
{
"backup": {
"id": "backup_id",
"name": "backup_name",
"links": "backup_link"
}
}
3.10
----
Added the filter parameters ``group_id`` to
list/detail volumes requests.
3.11
----
Added group types and group specs APIs.
3.12
----
Added volumes/summary API.
3.13
----
Added create/delete/update/list/show APIs for generic volume groups.
3.14
----
Added group snapshots and create group from src APIs.
3.15 (Maximum in Newton)
------------------------
Added injecting the response's `Etag` header to avoid the lost update
problem with volume metadata.
3.16
----
os-migrate_volume now accepts ``cluster`` parameter when we want to migrate a
volume to a cluster. If we pass the ``host`` parameter for a volume that is
in a cluster, the request will be sent to the cluster as if we had requested
that specific cluster. Only ``host`` or ``cluster`` can be provided.
Creating a managed volume also supports the cluster parameter.
3.17
----
os-snapshot-manage and os-volume-manage now support ``cluster`` parameter on
listings (summary and detailed). Both location parameters, ``cluster`` and
``host`` are exclusive and only one should be provided.
3.18
----
Added backup project attribute.
3.19
----
Added reset status actions 'reset_status' to group snapshot.
3.20
----
Added reset status actions 'reset_status' to generic volume group.
3.21
----
Show provider_id in detailed view of a volume for admin.
3.22
----
Added support to filter snapshot list based on metadata of snapshot.
3.23
----
Allow passing force parameter to volume delete.
3.24
----
New API endpoint /workers/cleanup allows triggering cleanup for cinder-volume
services. Meant for cleaning ongoing operations from failed nodes.
The cleanup will be performed by other services belonging to the same
cluster, so at least one of them must be up to be able to do the cleanup.
Cleanup cannot be triggered during a cloud upgrade.
If no arguments are provided cleanup will try to issue a clean message for
all nodes that are down, but we can restrict which nodes we want to be
cleaned using parameters ``service_id``, ``cluster_name``, ``host``,
``binary``, and ``disabled``.
Cleaning specific resources is also possible using ``resource_type`` and
``resource_id`` parameters.
We can even force cleanup on nodes that are up with ``is_up``, but that's
not recommended and should only used if you know what you are doing. For
example if you know a specific cinder-volume is down even though it's still
not being reported as down when listing the services and you know the cluster
has at least another service to do the cleanup.
API will return a dictionary with 2 lists, one with services that have been
issued a cleanup request (``cleaning`` key) and the other with services
that cannot be cleaned right now because there is no alternative service to
do the cleanup in that cluster (``unavailable`` key).
Data returned for each service element in these two lists consist of the
``id``, ``host``, ``binary``, and ``cluster_name``. These are not the
services that will be performing the cleanup, but the services that will be
cleaned up or couldn't be cleaned up.
3.25
----
Add ``volumes`` field to group list/detail and group show.
3.26
----
- New ``failover`` action equivalent to ``failover_host``, but accepting
``cluster`` parameter as well as the ``host`` cluster that
``failover_host`` accepts.
- ``freeze`` and ``thaw`` actions accept ``cluster`` parameter.
- Cluster listing accepts ``replication_status``, ``frozen`` and
``active_backend_id`` as filters, and returns additional fields for each
cluster: ``replication_status``, ``frozen``, ``active_backend_id``.
3.27 (Maximum in Ocata)
-----------------------
Added new attachment APIs. See the
`API reference `__
for details.
3.28
----
Add filters support to get_pools
3.29
----
Add filter, sorter and pagination support in group snapshot.
3.30
----
Support sort snapshots with "name".
3.31
----
Add support for configure resource query filters.
3.32
----
Added ``set-log`` and ``get-log`` service actions.
3.33
----
Add ``resource_filters`` API to retrieve configured resource filters.
3.34
----
Add like filter support in ``volume``, ``backup``, ``snapshot``, ``message``,
``attachment``, ``group`` and ``group-snapshot`` list APIs.
3.35
----
Add ``volume-type`` filter to Get-Pools API.
3.36
----
Add metadata to volumes/summary response body.
3.37
----
Support sort backup by "name".
3.38
----
Added enable_replication/disable_replication/failover_replication/
list_replication_targets for replication groups (Tiramisu).
3.39
----
Add ``project_id`` admin filters support to limits.
3.40
----
Add volume revert to its latest snapshot support.
3.41
----
Add ``user_id`` field to snapshot list/detail and snapshot show.
3.42
----
Add ability to extend 'in-use' volume. User should be aware of the
whole environment before using this feature because it's dependent
on several external factors below:
1. nova-compute version - needs to be the latest for Pike.
2. only the libvirt compute driver supports this currently.
3. only iscsi and fibre channel volume types are supported on the
nova side currently.
Administrator can disable this ability by updating the
``volume:extend_attached_volume`` policy rule. Extend of a reserved
Volume is NOT allowed.
3.43 (Maximum in Pike)
----------------------
Support backup CRUD with metadata.
3.44
----
Support attachment completion. See the
`API reference `__
for details.
3.45
----
Add ``count`` field to volume, backup and snapshot list and detail APIs.
3.46
----
Modify the behavior of the volume-create (``POST /v3/volumes``) call when
passing an ``imageRef`` in the request body. Prior to this microversion,
the image was simply downloaded and written to the volume. However, when
a volume is attached to a server, it is possible to use the Compute API
server ``createImage`` action to create an instance snapshot of the volume.
This is a zero-byte image in the Image Service that has a
``block_device_mapping`` image property whose value contains ``snapshot``
as the ``source_type`` and a ``snapshot_id`` reference to a volume snapshot
in the Block Storage service. From microversion 3.46 and later, when a
volume-create request is made referring to such an image, instead of using
the image to create a volume, the snapshot it references will be used.
.. note::
Due to changes to cinder to handle image-related CVEs, making a
volume-create call with an imageRef referring to a nova instance
snapshot specifying a microversion less than 3.46 may create a volume
in ``error`` status. This occurs when the ``disk_format`` property
of the image is something other than ``raw``, because for non-raw
formats, even an image containing no data will consist of more than
zero bytes, and thus the image is rejected as being of a different
format than is claimed.
3.47
----
Support create volume from backup.
3.48
----
Add ``shared_targets`` and ``service_uuid`` fields to volume.
3.49
----
Support report backend storage state in service list.
3.50 (Maximum in Queens)
------------------------
Services supporting this microversion are capable of volume multiattach.
This version does not need to be requested when creating the volume, but can
be used as a way to query if the capability exists in the Cinder service.
3.51
----
Add support for cross AZ backups.
3.52
----
``RESKEY:availability_zones`` is a reserved spec key for AZ volume type,
and filter volume type by ``extra_specs`` is supported now.
3.53
----
Schema validation support has been added using jsonschema for V2/V3
volume APIs.
- Create volume API
Before 3.53, create volume API used to accept any invalid parameters in the
request body like the ones below were passed by python-cinderclient.
1. user_id
2. project_id
3. status
4. attach_status
But in 3.53, this behavior is updated. If user passes any invalid
parameters to the API which are not documented in api-ref, then
it will raise badRequest error.
- Update volume API
Before 3.53, even if user doesn't pass any valid parameters in the request
body, the volume was updated.
But in 3.53, user will need to pass at least one valid parameter in the
request body otherwise it will return 400 error.
3.54
----
Add ``mode`` argument to attachment-create.
3.55 (Maximum in Rocky)
-----------------------
Support ability to transfer snapshots along with their parent volume.
3.56
----
Add ``user_id`` attribute to response body of list backup with detail and show
backup detail APIs.
3.57
----
Expanded volume transfer record details by adding ``source_project_id``,
``destination_project_id`` and ``accepted`` fields to ``transfer`` table and
related api (create/show/list detail transfer APIs) responses.
3.58
----
Add ``project_id`` attribute to response body of list groups with detail,
list group snapshots with detail, show group detail and show group snapshot
detail APIs.
3.59 (Maximum in Stein and Train)
---------------------------------
Support volume transfer pagination.
3.60 (Maximum in Ussuri)
------------------------
Users may apply time comparison filters to the volume summary list and volume
detail list requests by using the ``created_at`` or ``updated_at`` fields.
Time must be expressed in ISO 8601 format.
3.61
----
Add ``cluster_name`` attribute to response body of volume details for admin in
Active/Active HA mode.
3.62 (Maximum in Victoria)
--------------------------
Add support for set, get, and unset a default volume type for a specific
project. Setting this default overrides the configured default_volume_type
value.
3.63
----
Includes volume type ID in the volume-show and volume-detail-list JSON
responses. Before this microversion, Cinder returns only the volume type name
in the volume details.
3.64 (Maximum in Wallaby)
-------------------------
Include the ``encryption_key_id`` in volume and backup details when the
associated volume is encrypted.
3.65
----
Include a ``consumes_quota`` field in volume and snapshot details to indicate
whether the resource is consuming quota or not. Also, accept a
``consumes_quota`` filter, which takes a boolean value, in the volume and
snapshot list requests. (The default listing behavior is not to use this
filter.)
3.66 (Maximum in Xena)
----------------------
Volume snapshots of in-use volumes can be created without the 'force' flag.
Although the 'force' flag is now considered invalid when passed in a volume
snapshot request, for backward compatibility, the 'force' flag with a value
evaluating to True is silently ignored.
3.67
----
API URLs no longer need a "project_id" argument in them. For example, the API
route: ``https://$(controller)s/volume/v3/$(project_id)s/volumes`` is
equivalent to ``https://$(controller)s/volume/v3/volumes``. When interacting
with the cinder service as system or domain scoped users, a project_id should
not be specified in the API path.
3.68 (Maximum in Yoga)
----------------------
Support ability to re-image a volume with a specific image. Specify the
``os-reimage`` action in the request body.
3.69
----
Volume field ``shared_targets`` is a tristate boolean value now, with the
following meanings:
- ``true``: Do os-brick locking when host iSCSI initiator doesn't support
manual scans.
- ``false``: Never do locking.
- ``null``: Forced locking regardless of the iSCSI initiator.
3.70 (Maximum in Zed, 2023.1 and 2023.2)
----------------------------------------
Add the ability to transfer encrypted volumes and their snapshots. The feature
removes a prior restriction on transferring encrypted volumes. Otherwise, the
API request and response schema are unchanged.
3.71 (Maximum in 2024.1 and 2024.2)
-----------------------------------
Add the ``os-extend_volume_completion`` volume action, which Nova can use
to notify Cinder of success and error when handling a ``volume-extended``
external server event.
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/openstack/versioned_method.py 0000664 0000000 0000000 00000003171 15131732575 0026306 0 ustar 00root root 0000000 0000000 # Copyright 2014 IBM Corp.
# Copyright 2015 Clinton Knight
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import utils
class VersionedMethod(utils.ComparableMixin):
def __init__(self, name, start_version, end_version, experimental, func):
"""Versioning information for a single method.
Minimum and maximums are inclusive.
:param name: Name of the method
:param start_version: Minimum acceptable version
:param end_version: Maximum acceptable_version
:param func: Method to call
"""
self.name = name
self.start_version = start_version
self.end_version = end_version
self.experimental = experimental
self.func = func
def __str__(self):
args = {
'name': self.name,
'start': self.start_version,
'end': self.end_version
}
return ("Version Method %(name)s: min: %(start)s, max: %(end)s" % args)
def _cmpkey(self):
"""Return the value used by ComparableMixin for rich comparisons."""
return self.start_version
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/openstack/wsgi.py 0000664 0000000 0000000 00000147223 15131732575 0023730 0 ustar 00root root 0000000 0000000 # Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import abc
import functools
from http import HTTPStatus
import inspect
import math
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import strutils
import routes
import webob
import webob.exc
from cinder.api.openstack import api_version_request as api_version
from cinder.api.openstack import versioned_method
from cinder import exception
from cinder import i18n
i18n.enable_lazy()
from cinder.i18n import _
from cinder import utils
from cinder.wsgi import common as wsgi
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
SUPPORTED_CONTENT_TYPES = (
'application/json',
'application/vnd.openstack.volume+json',
)
_MEDIA_TYPE_MAP = {
'application/vnd.openstack.volume+json': 'json',
'application/json': 'json',
}
# name of attribute to keep version method information
VER_METHOD_ATTR = 'versioned_methods'
# Name of header used by clients to request a specific version
# of the REST API
API_VERSION_REQUEST_HEADER = 'OpenStack-API-Version'
VOLUME_SERVICE = 'volume'
class Request(webob.Request):
"""Add some OpenStack API-specific logic to the base webob.Request."""
def __init__(self, *args, **kwargs):
super(Request, self).__init__(*args, **kwargs)
self._resource_cache = {}
if not hasattr(self, 'api_version_request'):
self.api_version_request = api_version.APIVersionRequest()
def cache_resource(self, resource_to_cache, id_attribute='id', name=None):
"""Cache the given resource.
Allow API methods to cache objects, such as results from a DB query,
to be used by API extensions within the same API request.
The resource_to_cache can be a list or an individual resource,
but ultimately resources are cached individually using the given
id_attribute.
Different resources types might need to be cached during the same
request, they can be cached using the name parameter. For example:
Controller 1:
request.cache_resource(db_volumes, 'volumes')
request.cache_resource(db_volume_types, 'types')
Controller 2:
db_volumes = request.cached_resource('volumes')
db_type_1 = request.cached_resource_by_id('1', 'types')
If no name is given, a default name will be used for the resource.
An instance of this class only lives for the lifetime of a
single API request, so there's no need to implement full
cache management.
"""
if not isinstance(resource_to_cache, list):
resource_to_cache = [resource_to_cache]
if not name:
name = self.path
cached_resources = self._resource_cache.setdefault(name, {})
for resource in resource_to_cache:
cached_resources[resource[id_attribute]] = resource
def cached_resource(self, name=None):
"""Get the cached resources cached under the given resource name.
Allow an API extension to get previously stored objects within
the same API request.
Note that the object data will be slightly stale.
:returns: a dict of id_attribute to the resource from the cached
resources, an empty map if an empty collection was cached,
or None if nothing has been cached yet under this name
"""
if not name:
name = self.path
if name not in self._resource_cache:
# Nothing has been cached for this key yet
return None
return self._resource_cache[name]
def cached_resource_by_id(self, resource_id, name=None):
"""Get a resource by ID cached under the given resource name.
Allow an API extension to get a previously stored object
within the same API request. This is basically a convenience method
to lookup by ID on the dictionary of all cached resources.
Note that the object data will be slightly stale.
:returns: the cached resource or None if the item is not in the cache
"""
resources = self.cached_resource(name)
if not resources:
# Nothing has been cached yet for this key yet
return None
return resources.get(resource_id)
def cache_db_items(self, key, items, item_key='id'):
"""Get cached database items.
Allow API methods to store objects from a DB query to be
used by API extensions within the same API request.
An instance of this class only lives for the lifetime of a
single API request, so there's no need to implement full
cache management.
"""
self.cache_resource(items, item_key, key)
def get_db_items(self, key):
"""Get database items.
Allow an API extension to get previously stored objects within
the same API request.
Note that the object data will be slightly stale.
"""
return self.cached_resource(key)
def get_db_item(self, key, item_key):
"""Get database item.
Allow an API extension to get a previously stored object
within the same API request.
Note that the object data will be slightly stale.
"""
return self.get_db_items(key).get(item_key)
def cache_db_volumes(self, volumes):
# NOTE(mgagne) Cache it twice for backward compatibility reasons
self.cache_db_items('volumes', volumes, 'id')
self.cache_db_items(self.path, volumes, 'id')
def cache_db_volume(self, volume):
# NOTE(mgagne) Cache it twice for backward compatibility reasons
self.cache_db_items('volumes', [volume], 'id')
self.cache_db_items(self.path, [volume], 'id')
def get_db_volumes(self):
return (self.get_db_items('volumes') or
self.get_db_items(self.path))
def get_db_volume(self, volume_id):
return (self.get_db_item('volumes', volume_id) or
self.get_db_item(self.path, volume_id))
def cache_db_volume_types(self, volume_types):
self.cache_db_items('volume_types', volume_types, 'id')
def cache_db_volume_type(self, volume_type):
self.cache_db_items('volume_types', [volume_type], 'id')
def get_db_volume_types(self):
return self.get_db_items('volume_types')
def get_db_volume_type(self, volume_type_id):
return self.get_db_item('volume_types', volume_type_id)
def cache_db_snapshots(self, snapshots):
self.cache_db_items('snapshots', snapshots, 'id')
def cache_db_snapshot(self, snapshot):
self.cache_db_items('snapshots', [snapshot], 'id')
def get_db_snapshots(self):
return self.get_db_items('snapshots')
def get_db_snapshot(self, snapshot_id):
return self.get_db_item('snapshots', snapshot_id)
def cache_db_backups(self, backups):
self.cache_db_items('backups', backups, 'id')
def cache_db_backup(self, backup):
self.cache_db_items('backups', [backup], 'id')
def get_db_backups(self):
return self.get_db_items('backups')
def get_db_backup(self, backup_id):
return self.get_db_item('backups', backup_id)
def best_match_content_type(self):
"""Determine the requested response content-type."""
if 'cinder.best_content_type' not in self.environ:
# Calculate the best MIME type
content_type = None
# Check URL path suffix
parts = self.path.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in SUPPORTED_CONTENT_TYPES:
content_type = possible_type
if not content_type:
best_matches = self.accept.acceptable_offers(
SUPPORTED_CONTENT_TYPES)
if best_matches:
content_type = best_matches[0][0]
self.environ['cinder.best_content_type'] = (content_type or
'application/json')
return self.environ['cinder.best_content_type']
def get_content_type(self):
"""Determine content type of the request body.
Does not do any body introspection, only checks header
"""
if "Content-Type" not in self.headers:
return None
allowed_types = SUPPORTED_CONTENT_TYPES
content_type = self.content_type
if content_type not in allowed_types:
raise exception.InvalidContentType(content_type=content_type)
return content_type
def best_match_language(self):
"""Determines best available locale from the Accept-Language header.
:returns: the best language match or None if the 'Accept-Language'
header was not available in the request.
"""
if not self.accept_language:
return None
all_languages = i18n.get_available_languages()
# NOTE: To decide the default behavior, 'default' is preferred over
# 'default_tag' because that is return as it is when no match. This is
# also little tricky that 'default' value cannot be None. At least one
# of default_tag or default must be supplied as an argument to the
# method, to define the defaulting behavior. So passing a sentinal
# value to return None from this function.
best_match = self.accept_language.lookup(all_languages, default='fake')
if best_match == 'fake':
return None
return best_match
def set_api_version_request(self, url):
"""Set API version request based on the request header information."""
if API_VERSION_REQUEST_HEADER in self.headers:
hdr_string = self.headers[API_VERSION_REQUEST_HEADER]
# 'latest' is a special keyword which is equivalent to requesting
# the maximum version of the API supported
hdr_string_list = hdr_string.split(",")
volume_version = None
for hdr in hdr_string_list:
if VOLUME_SERVICE in hdr:
service, volume_version = hdr.split()
break
if not volume_version:
raise exception.VersionNotFoundForAPIMethod(
version=volume_version)
if volume_version == 'latest':
self.api_version_request = api_version.max_api_version()
else:
self.api_version_request = api_version.APIVersionRequest(
volume_version)
# Check that the version requested is within the global
# minimum/maximum of supported API versions
if not self.api_version_request.matches(
api_version.min_api_version(),
api_version.max_api_version()):
raise exception.InvalidGlobalAPIVersion(
req_ver=self.api_version_request.get_string(),
min_ver=api_version.min_api_version().get_string(),
max_ver=api_version.max_api_version().get_string())
else:
self.api_version_request = api_version.APIVersionRequest(
api_version._MIN_API_VERSION)
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class TextDeserializer(ActionDispatcher):
"""Default request body deserialization."""
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {}
class JSONDeserializer(TextDeserializer):
def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class DictSerializer(ActionDispatcher):
"""Default request body serialization."""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return ""
class JSONDictSerializer(DictSerializer):
"""Default JSON request body serialization."""
def default(self, data):
return jsonutils.dump_as_bytes(data)
def serializers(**serializers):
"""Attaches serializers to a method.
This decorator associates a dictionary of serializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_serializers'):
func.wsgi_serializers = {}
func.wsgi_serializers.update(serializers)
return func
return decorator
def deserializers(**deserializers):
"""Attaches deserializers to a method.
This decorator associates a dictionary of deserializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_deserializers'):
func.wsgi_deserializers = {}
func.wsgi_deserializers.update(deserializers)
return func
return decorator
def response(code):
"""Attaches response code to a method.
This decorator associates a response code with a method. Note
that the function attributes are directly manipulated; the method
is not wrapped.
"""
def decorator(func):
func.wsgi_code = code
return func
return decorator
class ResponseObject(object):
"""Bundles a response object with appropriate serializers.
Object that app methods may return in order to bind alternate
serializers with a response object to be serialized. Its use is
optional.
"""
def __init__(self, obj, code=None, headers=None, **serializers):
"""Binds serializers with an object.
Takes keyword arguments akin to the @serializer() decorator
for specifying serializers. Serializers specified will be
given preference over default serializers or method-specific
serializers on return.
"""
self.obj = obj
self.serializers = serializers
self._default_code = HTTPStatus.OK
self._code = code
self._headers = headers or {}
self.serializer = None
self.media_type = None
def __getitem__(self, key):
"""Retrieves a header with the given name."""
return self._headers[key.lower()]
def __setitem__(self, key, value):
"""Sets a header with the given name to the given value."""
self._headers[key.lower()] = value
def __delitem__(self, key):
"""Deletes the header with the given name."""
del self._headers[key.lower()]
def _bind_method_serializers(self, meth_serializers):
"""Binds method serializers with the response object.
Binds the method serializers with the response object.
Serializers specified to the constructor will take precedence
over serializers specified to this method.
:param meth_serializers: A dictionary with keys mapping to
response types and values containing
serializer objects.
"""
# We can't use update because that would be the wrong
# precedence
for mtype, serializer in meth_serializers.items():
self.serializers.setdefault(mtype, serializer)
def get_serializer(self, content_type, default_serializers=None):
"""Returns the serializer for the wrapped object.
Returns the serializer for the wrapped object subject to the
indicated content type. If no serializer matching the content
type is attached, an appropriate serializer drawn from the
default serializers will be used. If no appropriate
serializer is available, raises InvalidContentType.
"""
default_serializers = default_serializers or {}
try:
mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)
if mtype in self.serializers:
return mtype, self.serializers[mtype]
else:
return mtype, default_serializers[mtype]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
def preserialize(self, content_type, default_serializers=None):
"""Prepares the serializer that will be used to serialize.
Determines the serializer that will be used and prepares an
instance of it for later call. This allows the serializer to
be accessed by extensions for, e.g., template extension.
"""
mtype, serializer = self.get_serializer(content_type,
default_serializers)
self.media_type = mtype
self.serializer = serializer()
def attach(self, **kwargs):
"""Attach slave templates to serializers."""
if self.media_type in kwargs:
self.serializer.attach(kwargs[self.media_type])
def serialize(self, request, content_type, default_serializers=None):
"""Serializes the wrapped object.
Utility method for serializing the wrapped object. Returns a
webob.Response object.
"""
if self.serializer:
serializer = self.serializer
else:
_mtype, _serializer = self.get_serializer(content_type,
default_serializers)
serializer = _serializer()
response = webob.Response()
response.status_int = self.code
for hdr, value in self._headers.items():
response.headers[hdr] = str(value)
response.headers['Content-Type'] = str(content_type)
if self.obj is not None:
body = serializer.serialize(self.obj)
if isinstance(body, str):
body = body.encode('utf-8')
response.body = body
return response
@property
def code(self):
"""Retrieve the response status."""
return self._code or self._default_code
@property
def headers(self):
"""Retrieve the headers."""
return self._headers.copy()
def action_peek_json(body):
"""Determine action to invoke."""
try:
decoded = jsonutils.loads(body)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
# Make sure there's exactly one key...
if len(decoded) != 1:
msg = _("too many body keys")
raise exception.MalformedRequestBody(reason=msg)
# Return the action and the decoded body...
return list(decoded.keys())[0]
class ResourceExceptionHandler(object):
"""Context manager to handle Resource exceptions.
Used when processing exceptions generated by API implementation
methods (or their extensions). Converts most exceptions to Fault
exceptions, with the appropriate logging.
"""
def __enter__(self):
return None
def __exit__(self, ex_type, ex_value, ex_traceback):
if not ex_value:
return True
if isinstance(ex_value, exception.NotAuthorized):
msg = str(ex_value)
raise Fault(webob.exc.HTTPForbidden(explanation=msg))
elif isinstance(ex_value, exception.VersionNotFoundForAPIMethod):
raise
elif isinstance(ex_value, (exception.Invalid, exception.NotFound)):
raise Fault(exception.ConvertedException(
code=ex_value.code, explanation=str(ex_value)))
elif isinstance(ex_value, TypeError):
LOG.exception('Exception handling resource:')
raise Fault(webob.exc.HTTPBadRequest())
elif isinstance(ex_value, Fault):
LOG.info("Fault thrown: %s", ex_value)
raise
elif isinstance(ex_value, webob.exc.HTTPException):
LOG.info("HTTP exception thrown: %s", ex_value)
raise Fault(ex_value)
# We didn't handle the exception
return False
class Resource(wsgi.Application):
"""WSGI app that handles (de)serialization and controller dispatch.
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon its controller. All
controller action methods must accept a 'req' argument, which is the
incoming wsgi.Request. If the operation is a PUT or POST, the controller
method must also accept a 'body' argument (the deserialized request body).
They may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
Exceptions derived from webob.exc.HTTPException will be automatically
wrapped in Fault() to provide API friendly error responses.
"""
support_api_request_version = True
def __init__(self, controller, action_peek=None, **deserializers):
"""Initialize Resource.
:param controller: object that implement methods created by routes lib
:param action_peek: dictionary of routines for peeking into an action
request body to determine the desired action
"""
self.controller = controller
self.extension_controllers = set()
default_deserializers = dict(json=JSONDeserializer)
default_deserializers.update(deserializers)
self.default_deserializers = default_deserializers
self.default_serializers = dict(json=JSONDictSerializer)
self.action_peek = dict(json=action_peek_json)
self.action_peek.update(action_peek or {})
# Copy over the actions dictionary
self.wsgi_actions = {}
if controller:
self.register_actions(controller)
# Save a mapping of extensions
self.wsgi_extensions = {}
self.wsgi_action_extensions = {}
def register_actions(self, controller):
"""Registers controller actions with this resource."""
actions = getattr(controller, 'wsgi_actions', {})
for key, method_name in actions.items():
self.wsgi_actions[key] = getattr(controller, method_name)
def register_extensions(self, controller):
"""Registers controller extensions with this resource."""
self.extension_controllers.add(controller)
actions = getattr(controller, 'wsgi_actions', {})
for key, method_name in actions.items():
self.wsgi_actions[key] = getattr(controller, method_name)
extensions = getattr(controller, 'wsgi_extensions', [])
for method_name, action_name in extensions:
# Look up the extending method
extension = getattr(controller, method_name)
if action_name:
# Extending an action...
if action_name not in self.wsgi_action_extensions:
self.wsgi_action_extensions[action_name] = []
self.wsgi_action_extensions[action_name].append(extension)
else:
# Extending a regular method
if method_name not in self.wsgi_extensions:
self.wsgi_extensions[method_name] = []
self.wsgi_extensions[method_name].append(extension)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
# NOTE(Vek): Check for get_action_args() override in the
# controller
if hasattr(self.controller, 'get_action_args'):
return self.controller.get_action_args(request_environment)
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except (KeyError, IndexError, AttributeError):
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
def get_body(self, request):
if len(request.body) == 0:
LOG.debug("Empty body provided in request")
return None, ''
content_type = request.get_content_type()
if not content_type:
LOG.debug("No Content-Type provided in request")
return None, ''
return content_type, request.body
def deserialize(self, meth, content_type, body):
meth_deserializers = getattr(meth, 'wsgi_deserializers', {})
try:
mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)
if mtype in meth_deserializers:
deserializer = meth_deserializers[mtype]
else:
deserializer = self.default_deserializers[mtype]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
return deserializer().deserialize(body)
def pre_process_extensions(self, extensions, request, action_args):
# List of callables for post-processing extensions
post = []
for ext in extensions:
if inspect.isgeneratorfunction(ext):
response = None
# If it's a generator function, the part before the
# yield is the preprocessing stage
try:
with ResourceExceptionHandler():
gen = ext(req=request, **action_args)
response = next(gen)
except Fault as ex:
response = ex
# We had a response...
if response:
return response, []
# No response, queue up generator for post-processing
post.append(gen)
else:
# Regular functions only perform post-processing
post.append(ext)
# Run post-processing in the reverse order
return None, reversed(post)
def post_process_extensions(self, extensions, resp_obj, request,
action_args):
for ext in extensions:
response = None
if inspect.isgenerator(ext):
# If it's a generator, run the second half of
# processing
try:
with ResourceExceptionHandler():
response = ext.send(resp_obj)
except StopIteration:
# Normal exit of generator
continue
except Fault as ex:
response = ex
else:
# Regular functions get post-processing...
try:
with ResourceExceptionHandler():
response = ext(req=request, resp_obj=resp_obj,
**action_args)
except exception.VersionNotFoundForAPIMethod:
# If an attached extension (@wsgi.extends) for the
# method has no version match its not an error. We
# just don't run the extends code
continue
except Fault as ex:
response = ex
# We had a response...
if response:
return response
return None
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
LOG.info("%(method)s %(url)s",
{"method": request.method,
"url": request.url})
if self.support_api_request_version:
# Set the version of the API requested based on the header
try:
request.set_api_version_request(request.url)
except exception.InvalidAPIVersionString as e:
return Fault(webob.exc.HTTPBadRequest(
explanation=str(e)))
except exception.InvalidGlobalAPIVersion as e:
return Fault(webob.exc.HTTPNotAcceptable(
explanation=str(e)))
# Identify the action, its arguments, and the requested
# content type
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
# NOTE(sdague): we filter out InvalidContentTypes early so we
# know everything is good from here on out.
try:
content_type, body = self.get_body(request)
accept = request.best_match_content_type()
except exception.InvalidContentType:
msg = _("Unsupported Content-Type")
return Fault(webob.exc.HTTPUnsupportedMediaType(explanation=msg))
# NOTE(Vek): Splitting the function up this way allows for
# auditing by external tools that wrap the existing
# function. If we try to audit __call__(), we can
# run into troubles due to the @webob.dec.wsgify()
# decorator.
return self._process_stack(request, action, action_args,
content_type, body, accept)
def _process_stack(self, request, action, action_args,
content_type, body, accept):
"""Implement the processing stack."""
# Get the implementing method
try:
meth, extensions = self.get_method(request, action,
content_type, body)
except (AttributeError, TypeError):
return Fault(webob.exc.HTTPNotFound())
except KeyError as ex:
msg = (_("There is no such action: %s. Verify the request body "
"and Content-Type header and try again.") % ex.args[0])
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
if body:
decoded_body = encodeutils.safe_decode(body, errors='ignore')
msg = ("Action: '%(action)s', calling method: %(meth)s, body: "
"%(body)s") % {'action': action,
'body': decoded_body,
'meth': meth.__name__}
LOG.debug(strutils.mask_password(msg))
else:
LOG.debug("Calling method '%(meth)s'",
{'meth': meth.__name__})
# Now, deserialize the request body...
try:
if content_type:
contents = self.deserialize(meth, content_type, body)
else:
contents = {}
except exception.InvalidContentType:
msg = _("Unsupported Content-Type")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Update the action args
action_args.update(contents)
project_id = action_args.pop("project_id", None)
context = request.environ.get('cinder.context')
if (context and project_id and (project_id != context.project_id)):
msg = _("Malformed request url")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Run pre-processing extensions
response, post = self.pre_process_extensions(extensions,
request, action_args)
if not response:
try:
with ResourceExceptionHandler():
action_result = self.dispatch(meth, request, action_args)
except Fault as ex:
response = ex
if not response:
# No exceptions; convert action_result into a
# ResponseObject
resp_obj = None
if isinstance(action_result, dict) or action_result is None:
resp_obj = ResponseObject(action_result)
elif isinstance(action_result, ResponseObject):
resp_obj = action_result
else:
response = action_result
# Run post-processing extensions
if resp_obj:
_set_request_id_header(request, resp_obj)
# Do a preserialize to set up the response object
serializers = getattr(meth, 'wsgi_serializers', {})
resp_obj._bind_method_serializers(serializers)
if hasattr(meth, 'wsgi_code'):
resp_obj._default_code = meth.wsgi_code
resp_obj.preserialize(accept, self.default_serializers)
# Process post-processing extensions
response = self.post_process_extensions(post, resp_obj,
request, action_args)
if resp_obj and not response:
response = resp_obj.serialize(request, accept,
self.default_serializers)
try:
msg_dict = dict(url=request.url, status=response.status_int)
msg = "%(url)s returned with HTTP %(status)s"
except AttributeError as e:
msg_dict = dict(url=request.url, e=e)
msg = "%(url)s returned a fault: %(e)s"
LOG.info(msg, msg_dict)
if hasattr(response, 'headers'):
for hdr, val in response.headers.items():
# Headers must be utf-8 strings
val = utils.convert_str(val)
response.headers[hdr] = val
if (request.api_version_request and
not _is_legacy_endpoint(request)):
response.headers[API_VERSION_REQUEST_HEADER] = (
VOLUME_SERVICE + ' ' +
request.api_version_request.get_string())
response.headers['Vary'] = API_VERSION_REQUEST_HEADER
return response
def get_method(self, request, action, content_type, body):
"""Look up the action-specific method and its extensions."""
# Look up the method
try:
if not self.controller:
meth = getattr(self, action)
else:
meth = getattr(self.controller, action)
except AttributeError as e:
with excutils.save_and_reraise_exception(e) as ctxt:
if (not self.wsgi_actions or action not in ['action',
'create',
'delete',
'update']):
LOG.exception('Get method error.')
else:
ctxt.reraise = False
else:
return meth, self.wsgi_extensions.get(action, [])
if action == 'action':
# OK, it's an action; figure out which action...
mtype = _MEDIA_TYPE_MAP.get(content_type)
action_name = self.action_peek[mtype](body)
LOG.debug("Action body: %s", body)
else:
action_name = action
# Look up the action method
return (self.wsgi_actions[action_name],
self.wsgi_action_extensions.get(action_name, []))
def dispatch(self, method, request, action_args):
"""Dispatch a call to the action-specific method."""
try:
return method(req=request, **action_args)
except exception.VersionNotFoundForAPIMethod:
# We deliberately don't return any message information
# about the exception to the user so it looks as if
# the method is simply not implemented.
return Fault(webob.exc.HTTPNotFound())
def action(name):
"""Mark a function as an action.
The given name will be taken as the action key in the body.
This is also overloaded to allow extensions to provide
non-extending definitions of create and delete operations.
"""
def decorator(func):
func.wsgi_action = name
return func
return decorator
def extends(*args, **kwargs):
"""Indicate a function extends an operation.
Can be used as either::
@extends
def index(...):
pass
or as::
@extends(action='resize')
def _action_resize(...):
pass
"""
def decorator(func):
# Store enough information to find what we're extending
func.wsgi_extends = (func.__name__, kwargs.get('action'))
return func
# If we have positional arguments, call the decorator
if args:
return decorator(*args)
# OK, return the decorator instead
return decorator
class ControllerMetaclass(type):
"""Controller metaclass.
This metaclass automates the task of assembling a dictionary
mapping action keys to method names.
"""
def __new__(mcs, name, bases, cls_dict):
"""Adds the wsgi_actions dictionary to the class."""
# Find all actions
actions = {}
extensions = []
# NOTE(geguileo): We'll keep a list of versioned methods that have been
# added by the new metaclass (dictionary in attribute VER_METHOD_ATTR
# on Controller class) and all the versioned methods from the different
# base classes so we can consolidate them.
versioned_methods = []
# NOTE(cyeoh): This resets the VER_METHOD_ATTR attribute
# between API controller class creations. This allows us
# to use a class decorator on the API methods that doesn't
# require naming explicitly what method is being versioned as
# it can be implicit based on the method decorated. It is a bit
# ugly.
if bases != (object,) and VER_METHOD_ATTR in vars(Controller):
# Get the versioned methods that this metaclass creation has added
# to the Controller class
versioned_methods.append(getattr(Controller, VER_METHOD_ATTR))
# Remove them so next metaclass has a clean start
delattr(Controller, VER_METHOD_ATTR)
# start with wsgi actions from base classes
for base in bases:
actions.update(getattr(base, 'wsgi_actions', {}))
# Get the versioned methods that this base has
if VER_METHOD_ATTR in vars(base):
versioned_methods.append(getattr(base, VER_METHOD_ATTR))
for key, value in cls_dict.items():
if not isinstance(value, abc.Callable):
continue
if getattr(value, 'wsgi_action', None):
actions[value.wsgi_action] = key
elif getattr(value, 'wsgi_extends', None):
extensions.append(value.wsgi_extends)
# Add the actions and extensions to the class dict
cls_dict['wsgi_actions'] = actions
cls_dict['wsgi_extensions'] = extensions
if versioned_methods:
cls_dict[VER_METHOD_ATTR] = mcs.consolidate_vers(versioned_methods)
return super(ControllerMetaclass, mcs).__new__(mcs, name, bases,
cls_dict)
@staticmethod
def consolidate_vers(versioned_methods):
"""Consolidates a list of versioned methods dictionaries."""
if not versioned_methods:
return {}
result = versioned_methods.pop(0)
for base_methods in versioned_methods:
for name, methods in base_methods.items():
method_list = result.setdefault(name, [])
method_list.extend(methods)
method_list.sort(reverse=True)
return result
class Controller(object, metaclass=ControllerMetaclass):
"""Default controller."""
_view_builder_class = None
def __init__(self, view_builder=None):
"""Initialize controller with a view builder instance."""
if view_builder:
self._view_builder = view_builder
elif self._view_builder_class:
self._view_builder = self._view_builder_class()
else:
self._view_builder = None
def __getattribute__(self, key):
def version_select(*args, **kwargs):
"""Select and call the matching version of the specified method.
Look for the method which matches the name supplied and version
constraints and calls it with the supplied arguments.
:returns: Returns the result of the method called
:raises VersionNotFoundForAPIMethod: if there is no method which
matches the name and version constraints
"""
# The first arg to all versioned methods is always the request
# object. The version for the request is attached to the
# request object
if len(args) == 0:
version_request = kwargs['req'].api_version_request
else:
version_request = args[0].api_version_request
func_list = self.versioned_methods[key]
for func in func_list:
if version_request.matches_versioned_method(func):
# Update the version_select wrapper function so
# other decorator attributes like wsgi.response
# are still respected.
functools.update_wrapper(version_select, func.func)
return func.func(self, *args, **kwargs)
# No version match
raise exception.VersionNotFoundForAPIMethod(
version=version_request)
try:
version_meth_dict = object.__getattribute__(self, VER_METHOD_ATTR)
except AttributeError:
# No versioning on this class
return object.__getattribute__(self, key)
if (version_meth_dict and key in
object.__getattribute__(self, VER_METHOD_ATTR)):
return version_select
return object.__getattribute__(self, key.replace('-', '_'))
# NOTE(cyeoh): This decorator MUST appear first (the outermost
# decorator) on an API method for it to work correctly
@classmethod
def api_version(cls, min_ver, max_ver=None, experimental=False):
"""Decorator for versioning API methods.
Add the decorator to any method which takes a request object
as the first parameter and belongs to a class which inherits from
wsgi.Controller.
:param min_ver: string representing minimum version
:param max_ver: optional string representing maximum version
"""
def decorator(f):
obj_min_ver = api_version.APIVersionRequest(min_ver)
if max_ver:
obj_max_ver = api_version.APIVersionRequest(max_ver)
else:
obj_max_ver = api_version.APIVersionRequest()
# Add to list of versioned methods registered
func_name = f.__name__
new_func = versioned_method.VersionedMethod(
func_name, obj_min_ver, obj_max_ver, experimental, f)
func_dict = getattr(cls, VER_METHOD_ATTR, {})
if not func_dict:
setattr(cls, VER_METHOD_ATTR, func_dict)
func_list = func_dict.get(func_name, [])
if not func_list:
func_dict[func_name] = func_list
func_list.append(new_func)
# Ensure the list is sorted by minimum version (reversed)
# so later when we work through the list in order we find
# the method which has the latest version which supports
# the version requested.
# TODO(cyeoh): Add check to ensure that there are no overlapping
# ranges of valid versions as that is ambiguous
func_list.sort(reverse=True)
# NOTE(geguileo): To avoid PEP8 errors when defining multiple
# microversions of the same method in the same class we add the
# api_version decorator to the function so it can be used instead,
# thus preventing method redefinition errors.
f.api_version = cls.api_version
return f
return decorator
@staticmethod
def clean_name_and_description(body):
"""Strip whitespace from name and description fields."""
for attribute in ['name', 'description',
'display_name', 'display_description']:
value = body.get(attribute)
if value is not None:
if isinstance(value, str):
body[attribute] = value.strip()
@staticmethod
def validate_string_length(value, entity_name, min_length=0,
max_length=None, remove_whitespaces=False):
"""Check the length of specified string.
:param value: the value of the string
:param entity_name: the name of the string
:param min_length: the min_length of the string
:param max_length: the max_length of the string
:param remove_whitespaces: True if trimming whitespaces is needed
else False
"""
if isinstance(value, str) and remove_whitespaces:
value = value.strip()
try:
utils.check_string_length(value, entity_name,
min_length=min_length,
max_length=max_length)
except exception.InvalidInput as error:
raise webob.exc.HTTPBadRequest(explanation=error.msg)
class APIMapper(routes.Mapper):
def routematch(self, url=None, environ=None):
if url == "":
result = self._match("", environ)
return result[0], result[1]
return routes.Mapper.routematch(self, url, environ)
def connect(self, *args, **kwargs):
# NOTE(inhye): Default the format part of a route to only accept json
# so it doesn't eat all characters after a '.' in the url.
kwargs.setdefault('requirements', {})
if not kwargs['requirements'].get('format'):
kwargs['requirements']['format'] = 'json'
return routes.Mapper.connect(self, *args, **kwargs)
def create_route(self, path, method, controller, action):
# NOTE: project_id parameter is only valid if its hex or hex + dashes
# (note, integers are a subset of this). This is required to handle
# our overlapping routes issues.
project_id_regex = CONF.project_id_regex
project_id_token = '{project_id:%s}' % project_id_regex
self.connect(
'/%s%s' % (project_id_token, path),
conditions={"method": [method]},
controller=controller,
action=action,
)
self.connect(
path,
conditions={"method": [method]},
controller=controller,
action=action,
)
class Fault(webob.exc.HTTPException):
"""Wrap webob.exc.HTTPException to provide API friendly response."""
_fault_names = {HTTPStatus.BAD_REQUEST: "badRequest",
HTTPStatus.UNAUTHORIZED: "unauthorized",
HTTPStatus.FORBIDDEN: "forbidden",
HTTPStatus.NOT_FOUND: "itemNotFound",
HTTPStatus.METHOD_NOT_ALLOWED: "badMethod",
HTTPStatus.CONFLICT: "conflictingRequest",
HTTPStatus.REQUEST_ENTITY_TOO_LARGE: "overLimit",
HTTPStatus.UNSUPPORTED_MEDIA_TYPE: "badMediaType",
HTTPStatus.NOT_IMPLEMENTED: "notImplemented",
HTTPStatus.SERVICE_UNAVAILABLE: "serviceUnavailable"}
def __init__(self, exception):
"""Create a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
self.status_int = exception.status_int
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
# Replace the body with fault details.
locale = req.best_match_language()
code = self.wrapped_exc.status_int
fault_name = self._fault_names.get(code, "computeFault")
explanation = self.wrapped_exc.explanation
fault_data = {
fault_name: {
'code': code,
'message': i18n.translate(explanation, locale)}}
if code == HTTPStatus.REQUEST_ENTITY_TOO_LARGE:
retry = self.wrapped_exc.headers.get('Retry-After', None)
if retry:
fault_data[fault_name]['retryAfter'] = retry
if req.api_version_request and not _is_legacy_endpoint(req):
self.wrapped_exc.headers[API_VERSION_REQUEST_HEADER] = (
VOLUME_SERVICE + ' ' + req.api_version_request.get_string())
self.wrapped_exc.headers['Vary'] = API_VERSION_REQUEST_HEADER
content_type = req.best_match_content_type()
serializer = {
'application/json': JSONDictSerializer(),
}[content_type]
body = serializer.serialize(fault_data)
if isinstance(body, str):
body = body.encode('utf-8')
self.wrapped_exc.body = body
self.wrapped_exc.content_type = content_type
_set_request_id_header(req, self.wrapped_exc.headers)
return self.wrapped_exc
def __str__(self):
return self.wrapped_exc.__str__()
def _set_request_id_header(req, headers):
context = req.environ.get('cinder.context')
if context:
headers['x-compute-request-id'] = context.request_id
def _is_legacy_endpoint(request):
version_str = request.api_version_request.get_string()
return '1.0' in version_str or '2.0' in version_str
class OverLimitFault(webob.exc.HTTPException):
"""Rate-limited request response."""
def __init__(self, message, details, retry_time):
"""Initialize new `OverLimitFault` with relevant information."""
hdrs = OverLimitFault._retry_after(retry_time)
self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs)
self.content = {
"overLimitFault": {
"code": self.wrapped_exc.status_int,
"message": message,
"details": details,
},
}
@staticmethod
def _retry_after(retry_time):
delay = int(math.ceil(retry_time - time.time()))
retry_after = delay if delay > 0 else 0
headers = {'Retry-After': '%d' % retry_after}
return headers
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""Serializes the wrapped exception conforming to our error format."""
content_type = request.best_match_content_type()
def translate(msg):
locale = request.best_match_language()
return i18n.translate(msg, locale)
self.content['overLimitFault']['message'] = \
translate(self.content['overLimitFault']['message'])
self.content['overLimitFault']['details'] = \
translate(self.content['overLimitFault']['details'])
serializer = {
'application/json': JSONDictSerializer(),
}[content_type]
content = serializer.serialize(self.content)
self.wrapped_exc.body = content
return self.wrapped_exc
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/ 0000775 0000000 0000000 00000000000 15131732575 0022030 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/__init__.py 0000664 0000000 0000000 00000000000 15131732575 0024127 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/admin_actions.py 0000664 0000000 0000000 00000011454 15131732575 0025217 0 ustar 00root root 0000000 0000000 # Copyright (C) 2018 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 admin_actions API.
"""
import copy
from cinder.api.validation import parameter_types
# TODO: Restrict the value to 'null' in a future API version
_force_delete = {
'type': 'object',
'properties': {
'os-force_delete': {},
},
'required': ['os-force_delete'],
'additionalProperties': False,
}
force_delete_volume = copy.deepcopy(_force_delete)
force_delete_snapshot = copy.deepcopy(_force_delete)
force_delete_backup = copy.deepcopy(_force_delete)
reset_status_volume = {
'type': 'object',
'properties': {
'os-reset_status': {
'type': 'object',
'format': 'validate_volume_reset_body',
'properties': {
'status': {'type': ['string', 'null'],
'format': 'volume_status'},
'attach_status': {'type': ['string', 'null'],
'format': 'volume_attach_status'},
'migration_status': {'type': ['string', 'null'],
'format': 'volume_migration_status'},
},
'additionalProperties': False,
},
},
'required': ['os-reset_status'],
'additionalProperties': False,
}
force_detach = {
'type': 'object',
'properties': {
'os-force_detach': {
'type': 'object',
'properties': {
'connector': {'type': ['string', 'object', 'null']},
'attachment_id': {'type': ['string', 'null']}
},
'additionalProperties': False,
},
},
'required': ['os-force_detach'],
'additionalProperties': False,
}
migrate_volume = {
'type': 'object',
'properties': {
'os-migrate_volume': {
'type': 'object',
'properties': {
'host': {'type': 'string', 'maxLength': 255},
'force_host_copy': parameter_types.boolean,
'lock_volume': parameter_types.boolean,
},
'required': ['host'],
'additionalProperties': False,
},
},
'required': ['os-migrate_volume'],
'additionalProperties': False,
}
migrate_volume_v316 = {
'type': 'object',
'properties': {
'os-migrate_volume': {
'type': 'object',
'properties': {
'host': {'type': ['string', 'null'],
'maxLength': 255},
'force_host_copy': parameter_types.boolean,
'lock_volume': parameter_types.boolean,
'cluster': parameter_types.name_allow_zero_min_length,
},
'additionalProperties': False,
},
},
'required': ['os-migrate_volume'],
'additionalProperties': False,
}
migrate_volume_completion = {
'type': 'object',
'properties': {
'os-migrate_volume_completion': {
'type': 'object',
'properties': {
'new_volume': parameter_types.uuid,
'error': {'type': ['string', 'null', 'boolean']},
},
'required': ['new_volume'],
'additionalProperties': False,
},
},
'required': ['os-migrate_volume_completion'],
'additionalProperties': False,
}
extend_volume_completion = {
'type': 'object',
'properties': {
'os-extend_volume_completion': {
'type': 'object',
'properties': {
'error': {'type': ['string', 'null', 'boolean']},
},
'additionalProperties': False,
},
},
'required': ['os-extend_volume_completion'],
'additionalProperties': False,
}
reset_status_backup = {
'type': 'object',
'properties': {
'os-reset_status': {
'type': 'object',
'properties': {
'status': {'type': 'string',
'format': 'backup_status'},
},
'required': ['status'],
'additionalProperties': False,
},
},
'required': ['os-reset_status'],
'additionalProperties': False,
}
reset_status_snapshot = copy.deepcopy(reset_status_backup)
reset_status_snapshot['properties']['os-reset_status'][
'properties']['status']['format'] = 'snapshot_status'
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/attachments.py 0000664 0000000 0000000 00000003760 15131732575 0024723 0 ustar 00root root 0000000 0000000 # Copyright (C) 2017 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 Attachments API.
"""
import copy
from cinder.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'type': 'object',
'attachment': {
'type': 'object',
'properties': {
'instance_uuid': parameter_types.uuid,
'connector': {'type': ['object', 'null']},
'volume_uuid': parameter_types.uuid,
},
'required': ['volume_uuid'],
'additionalProperties': False,
},
},
'required': ['attachment'],
'additionalProperties': False,
}
update = {
'type': 'object',
'properties': {
'type': 'object',
'attachment': {
'type': 'object',
'properties': {
'connector': {'type': 'object', 'minProperties': 1},
},
'required': ['connector'],
'additionalProperties': False,
},
},
'required': ['attachment'],
'additionalProperties': False,
}
create_v354 = copy.deepcopy(create)
create_v354['properties']['attachment']['properties']['mode'] = (
{'type': 'string', 'enum': ['rw', 'ro']})
# TODO: Restrict the value to 'null' in a future API version
complete = {
'type': 'object',
'properties': {
'os-complete': {},
},
'required': ['os-complete'],
'additionalProperties': False,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/backups.py 0000664 0000000 0000000 00000006403 15131732575 0024035 0 ustar 00root root 0000000 0000000 # Copyright (C) 2017 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 Backups API.
"""
import copy
from cinder.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'type': 'object',
'backup': {
'type': 'object',
'properties': {
'volume_id': parameter_types.uuid,
'container': parameter_types.container,
'description': parameter_types.description,
'incremental': parameter_types.boolean,
'force': parameter_types.boolean,
'name': parameter_types.name_allow_zero_min_length,
'snapshot_id': parameter_types.uuid_allow_null,
},
'required': ['volume_id'],
'additionalProperties': False,
},
},
'required': ['backup'],
'additionalProperties': False,
}
create_backup_v343 = copy.deepcopy(create)
create_backup_v343['properties']['backup']['properties'][
'metadata'] = parameter_types.metadata_allows_null
create_backup_v351 = copy.deepcopy(create_backup_v343)
create_backup_v351['properties']['backup']['properties'][
'availability_zone'] = parameter_types.nullable_string
update = {
'type': 'object',
'properties': {
'type': 'object',
'backup': {
'type': ['object', 'null'],
'properties': {
'name': parameter_types.name_allow_zero_min_length,
'description': parameter_types.description,
},
'additionalProperties': False,
},
},
'required': ['backup'],
'additionalProperties': False,
}
update_backup_v343 = copy.deepcopy(update)
update_backup_v343['properties']['backup']['properties'][
'metadata'] = parameter_types.extra_specs
restore = {
'type': 'object',
'properties': {
'type': 'object',
'restore': {
'type': ['object', 'null'],
'properties': {
'name': parameter_types.name_allow_zero_min_length,
'volume_id': parameter_types.uuid_allow_null
},
'additionalProperties': False,
},
},
'required': ['restore'],
'additionalProperties': False,
}
import_record = {
'type': 'object',
'properties': {
'type': 'object',
'backup-record': {
'type': 'object',
'properties': {
'backup_service': parameter_types.backup_service,
'backup_url': parameter_types.backup_url
},
'required': ['backup_service', 'backup_url'],
'additionalProperties': False,
},
},
'required': ['backup-record'],
'additionalProperties': False,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/cgsnapshots.py 0000664 0000000 0000000 00000002344 15131732575 0024741 0 ustar 00root root 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(stephenfin): We'd like to set additionalProperties=False but we won't
# because the API is deprecated
create = {
'type': 'object',
'properties': {
'cgsnapshot': {
'type': 'object',
'properties': {
'consistencygroup_id': {
'type': 'string',
},
'name': {
'type': 'string',
},
'description': {
'type': 'string',
},
},
'required': ['consistencygroup_id'],
'additionalProperties': True,
},
},
'required': ['cgsnapshot'],
'additionalProperties': True,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/clusters.py 0000664 0000000 0000000 00000002364 15131732575 0024253 0 ustar 00root root 0000000 0000000 # Copyright (C) 2018 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 Clusters API.
"""
from cinder.api.validation import parameter_types
disable_cluster = {
'type': 'object',
'properties': {
'name': parameter_types.name,
'binary': parameter_types.nullable_string,
'disabled_reason': {
'type': ['string', 'null'], 'format': 'disabled_reason'
}
},
'required': ['name'],
'additionalProperties': False,
}
enable_cluster = {
'type': 'object',
'properties': {
'name': parameter_types.name,
'binary': parameter_types.nullable_string
},
'required': ['name'],
'additionalProperties': False,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/consistencygroups.py 0000664 0000000 0000000 00000006732 15131732575 0026213 0 ustar 00root root 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api.validation import parameter_types
# NOTE: These schemas are very loose but they won't be fixed as the API itself
# is deprecated.
create = {
'type': 'object',
'properties': {
'consistencygroup': {
'type': 'object',
'properties': {
'name': parameter_types.name_allow_zero_min_length,
'description': parameter_types.description,
'volume_types': {},
'availability_zone': {},
},
'required': ['volume_types'],
'additionalProperties': True,
},
},
'required': ['consistencygroup'],
'additionalProperties': True,
}
create_from_src = {
'type': 'object',
'properties': {
'consistencygroup-from-src': {
'type': 'object',
'properties': {
'name': parameter_types.name_allow_zero_min_length,
'description': parameter_types.description,
'cgsnapshot_id': {
'type': 'string',
},
'source_cgid': {
'type': 'string',
},
},
'required': [],
'additionalProperties': True,
},
},
'required': ['consistencygroup-from-src'],
'additionalProperties': True,
}
# NOTE: This one is weird. Effectively, we want to make the body optional but
# because the code is using a false'y check rather than an explict 'is None'
# check, we have allowed empty bodies. As such, the body can either be an
# object with a required key, an empty body, or null.
# TODO: Disallow the empty body case.
delete = {
'oneOf': [
{
'type': 'object',
'properties': {
'consistencygroup': {
'type': 'object',
'properties': {
'force': parameter_types.boolean,
},
'required': [],
'additionalProperties': True,
},
},
'required': ['consistencygroup'],
'additionalProperties': True,
},
{
'type': 'object',
'properties': {},
'additionalProperties': False,
},
{
'type': 'null',
},
],
}
update = {
'type': 'object',
'properties': {
'consistencygroup': {
'type': 'object',
'properties': {
'name': parameter_types.name_allow_zero_min_length,
'description': parameter_types.description,
'add_volumes': {
'type': ['string', 'null'],
},
'remove_volumes': {
'type': ['string', 'null'],
},
},
'required': [],
'additionalProperties': True,
},
},
'required': ['consistencygroup'],
'additionalProperties': True,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/default_types.py 0000664 0000000 0000000 00000002020 15131732575 0025244 0 ustar 00root root 0000000 0000000 # Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 Default types API.
"""
from cinder.api.validation import parameter_types
create_or_update = {
'type': 'object',
'properties': {
'default_type': {
'type': 'object',
'properties': {
'volume_type': parameter_types.name,
},
'required': ['volume_type'],
'additionalProperties': False,
},
}
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/group_snapshots.py 0000664 0000000 0000000 00000003210 15131732575 0025634 0 ustar 00root root 0000000 0000000 # Copyright (C) 2017 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 Group Snapshots API.
"""
from cinder.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'type': 'object',
'group_snapshot': {
'type': 'object',
'properties': {
'group_id': parameter_types.uuid,
'name': parameter_types.name_allow_zero_min_length,
'description': parameter_types.description,
},
'required': ['group_id'],
'additionalProperties': False,
},
},
'required': ['group_snapshot'],
'additionalProperties': False,
}
reset_status = {
'type': 'object',
'properties': {
'type': 'object',
'reset_status': {
'type': 'object',
'properties': {
'status': parameter_types.group_snapshot_status,
},
'required': ['status'],
'additionalProperties': False,
},
},
'required': ['reset_status'],
'additionalProperties': False,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/group_specs.py 0000664 0000000 0000000 00000002345 15131732575 0024737 0 ustar 00root root 0000000 0000000 # Copyright (C) 2017 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
group_specs_with_no_spaces_key_and_value_null = {
'type': 'object',
'patternProperties': {
'^[a-zA-Z0-9-_:.]{1,255}$': {
'type': ['string', 'null'], 'maxLength': 255
}
},
'additionalProperties': False
}
create = {
'type': 'object',
'properties': {
'type': 'object',
'group_specs': group_specs_with_no_spaces_key_and_value_null,
},
'required': ['group_specs'],
'additionalProperties': False,
}
update = copy.deepcopy(group_specs_with_no_spaces_key_and_value_null)
update.update({
'minProperties': 1,
'maxProperties': 1
})
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/group_types.py 0000664 0000000 0000000 00000003363 15131732575 0024767 0 ustar 00root root 0000000 0000000 # Copyright (C) 2017 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 Group types API.
"""
from cinder.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'type': 'object',
'group_type': {
'type': 'object',
'properties': {
'name': parameter_types.name,
'description': parameter_types.description,
'is_public': parameter_types.boolean,
'group_specs': parameter_types.metadata_allows_null,
},
'required': ['name'],
'additionalProperties': False,
},
},
'required': ['group_type'],
'additionalProperties': False,
}
update = {
'type': 'object',
'properties': {
'type': 'object',
'group_type': {
'type': 'object',
'properties': {
'name': parameter_types.name_allow_zero_min_length,
'description': parameter_types.description,
'is_public': parameter_types.boolean,
},
'additionalProperties': False,
},
},
'required': ['group_type'],
'additionalProperties': False,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/groups.py 0000664 0000000 0000000 00000011534 15131732575 0023725 0 ustar 00root root 0000000 0000000 # Copyright (C) 2018 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 Generic Volume Groups API.
"""
from cinder.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'group': {
'type': 'object',
'properties': {
'description': parameter_types.description,
'group_type': {
'type': 'string', 'format': 'group_type'
},
'name': parameter_types.name_allow_zero_min_length,
'volume_types': {
'type': 'array', 'minItems': 1,
'items': {
'type': 'string', 'maxLength': 255,
},
'uniqueItems': True
},
'availability_zone': {
'type': ['string', 'null'], 'format': 'availability_zone'
},
},
'required': ['group_type', 'volume_types'],
'additionalProperties': False,
},
},
'required': ['group'],
'additionalProperties': False,
}
create_from_source = {
'type': 'object',
'properties': {
'create-from-src': {
'type': 'object',
'properties': {
'description': parameter_types.description,
'name': parameter_types.name_allow_zero_min_length,
'source_group_id': parameter_types.uuid,
'group_snapshot_id': parameter_types.uuid,
},
'oneOf': [
{'required': ['group_snapshot_id']},
{'required': ['source_group_id']}
],
'additionalProperties': False,
},
},
'required': ['create-from-src'],
'additionalProperties': False,
}
delete = {
'type': 'object',
'properties': {
'delete': {
'type': 'object',
'properties': {
'delete-volumes': parameter_types.boolean,
},
'additionalProperties': False,
},
},
'required': ['delete'],
'additionalProperties': False,
}
reset_status = {
'type': 'object',
'properties': {
'reset_status': {
'type': 'object',
'properties': {
'status': {
'type': 'string', 'format': 'group_status'
},
},
'required': ['status'],
'additionalProperties': False,
},
},
'required': ['reset_status'],
'additionalProperties': False,
}
update = {
'type': 'object',
'properties': {
'group': {
'type': 'object',
'properties': {
'description': parameter_types.description,
'name': parameter_types.name_allow_zero_min_length,
'add_volumes': parameter_types.description,
'remove_volumes': parameter_types.description,
},
'anyOf': [
{'required': ['name']},
{'required': ['description']},
{'required': ['add_volumes']},
{'required': ['remove_volumes']},
],
'additionalProperties': False,
},
},
'required': ['group'],
'additionalProperties': False,
}
failover_replication = {
'type': 'object',
'properties': {
'failover_replication': {
'type': 'object',
'properties': {
'allow_attached_volume': parameter_types.boolean,
'secondary_backend_id': parameter_types.nullable_string,
},
'additionalProperties': False,
},
},
'required': ['failover_replication'],
'additionalProperties': False,
}
list_replication = {
'type': 'object',
'properties': {
'list_replication_targets': {'type': 'object'}
},
'required': ['list_replication_targets'],
'additionalProperties': False,
}
enable_replication = {
'type': 'object',
'properties': {
'enable_replication': {'type': 'object'}
},
'required': ['enable_replication'],
'additionalProperties': False,
}
disable_replication = {
'type': 'object',
'properties': {
'disable_replication': {'type': 'object'}
},
'required': ['disable_replication'],
'additionalProperties': False,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/hosts.py 0000664 0000000 0000000 00000001673 15131732575 0023551 0 ustar 00root root 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE: These schemas are very loose but they won't be fixed as the API itself
# is deprecated.
update = {
'type': 'object',
'properties': {
'status': {
'type': 'string',
'enum': ['enable', 'disable'],
},
},
'required': [],
# we allow additional properties because status can be in upper or mixed
# case also
'additionalProperties': True,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/qos_specs.py 0000664 0000000 0000000 00000003202 15131732575 0024376 0 ustar 00root root 0000000 0000000 # Copyright 2017 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cinder.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'type': 'object',
'qos_specs': {
'type': 'object',
'properties': {
'name': {
'type': 'string',
'format': 'name_skip_leading_trailing_spaces'
},
},
'required': ['name'],
'additionalProperties': True,
},
},
'required': ['qos_specs'],
'additionalProperties': False,
}
set = {
'type': 'object',
'properties': {
'qos_specs': parameter_types.extra_specs_with_null
},
'required': ['qos_specs'],
'additionalProperties': False,
}
unset = {
'type': 'object',
'properties': {
'keys': {
'type': 'array',
'items': {
'type': 'string', 'minLength': 1, 'maxLength': 255
},
'uniqueItems': True
},
},
'required': ['keys'],
'additionalProperties': False,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/quota_classes.py 0000664 0000000 0000000 00000001672 15131732575 0025256 0 ustar 00root root 0000000 0000000 # Copyright (C) 2017 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 Quota classes API.
"""
from cinder.api.validation import parameter_types
update_quota_class = {
'type': 'object',
'properties': {
'type': 'object',
'quota_class_set': parameter_types.quota_class_set
},
'required': ['quota_class_set'],
'additionalProperties': False,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/quotas.py 0000664 0000000 0000000 00000001632 15131732575 0023720 0 ustar 00root root 0000000 0000000 # Copyright (C) 2018 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 Quotas API.
"""
update = {
'type': 'object',
'properties': {
'quota_set': {
'type': 'object',
'minProperties': 1,
'format': 'quota_set',
},
},
'required': ['quota_set'],
'additionalProperties': False,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/scheduler_hints.py 0000664 0000000 0000000 00000006017 15131732575 0025571 0 ustar 00root root 0000000 0000000 # Copyright (C) 2018 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 scheduler_hints API.
"""
from cinder.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'OS-SCH-HNT:scheduler_hints': {
'type': ['object', 'null'],
'properties': {
'local_to_instance': parameter_types.optional_uuid,
'different_host': {
# NOTE: The value of 'different_host' is the set of volume
# uuids where a new volume is scheduled on a different
# host. A user can specify one volume as string parameter
# and should specify multiple volumes as array parameter
# instead.
'oneOf': [
{
'type': 'string',
'format': 'uuid'
},
{
'type': 'array',
'items': parameter_types.uuid,
'uniqueItems': True,
}
]
},
'same_host': {
# NOTE: The value of 'same_host' is the set of volume
# uuids where a new volume is scheduled on the same host.
# A user can specify one volume as string parameter and
# should specify multiple volumes as array parameter
# instead.
'oneOf': [
{
'type': 'string',
'format': 'uuid'
},
{
'type': 'array',
'items': parameter_types.uuid,
'uniqueItems': True,
}
]
},
'query': {
# NOTE: The value of 'query' is converted to dict data with
# jsonutils.loads() and used for filtering hosts.
'type': ['string', 'object'],
},
},
# NOTE: As this Mail:
# http://lists.openstack.org/pipermail/openstack-dev/2015-June/067996.html
# pointed out the limit the scheduler-hints in the API is
# problematic. So relax it.
'additionalProperties': True
},
},
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/services.py 0000664 0000000 0000000 00000004501 15131732575 0024225 0 ustar 00root root 0000000 0000000 # Copyright 2018 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from cinder.api.validation import parameter_types
enable_and_disable = {
'type': 'object',
'properties': {
'binary': {'type': 'string', 'minLength': 1, 'maxLength': 255},
'host': parameter_types.cinder_host,
'cluster': parameter_types.nullable_string,
'service': {'type': 'string', 'minLength': 1, 'maxLength': 255},
},
'anyOf': [
{'required': ['binary']},
{'required': ['service']}
],
'additionalProperties': False,
}
disable_log_reason = copy.deepcopy(enable_and_disable)
disable_log_reason['properties'][
'disabled_reason'] = {'type': 'string', 'minLength': 1, 'maxLength': 255,
'format': 'disabled_reason'}
set_log = {
'type': 'object',
'properties': {
'binary': parameter_types.binary,
'server': parameter_types.nullable_string,
'prefix': parameter_types.nullable_string,
'level': {'type': ['string', 'null'], 'format': 'level'}
},
'additionalProperties': False,
}
get_log = {
'type': 'object',
'properties': {
'binary': parameter_types.binary,
'server': parameter_types.nullable_string,
'prefix': parameter_types.nullable_string,
},
'additionalProperties': False,
}
freeze_and_thaw = {
'type': 'object',
'properties': {
'cluster': parameter_types.nullable_string,
'host': parameter_types.cinder_host,
},
'additionalProperties': False,
}
failover_host = {
'type': 'object',
'properties': {
'host': parameter_types.cinder_host,
'backend_id': parameter_types.nullable_string,
'cluster': parameter_types.nullable_string,
},
'additionalProperties': False,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/snapshot_actions.py 0000664 0000000 0000000 00000002156 15131732575 0025765 0 ustar 00root root 0000000 0000000 # Copyright (C) 2018 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 snapshot actions API.
"""
update_snapshot_status = {
'type': 'object',
'properties': {
'os-update_snapshot_status': {
'type': 'object',
'properties': {
'status': {'type': 'string'},
'progress': {'format': 'progress'},
},
'required': ['status'],
'additionalProperties': False,
},
},
'required': ['os-update_snapshot_status'],
'additionalProperties': False,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/snapshot_manage.py 0000664 0000000 0000000 00000003534 15131732575 0025556 0 ustar 00root root 0000000 0000000 # Copyright (C) 2017 NTT DATA
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 snapshot_manage API.
"""
from cinder.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'type': 'object',
'snapshot': {
'type': 'object',
'properties': {
"description": parameter_types.description,
"metadata": parameter_types.metadata_allows_null,
"name": parameter_types.name_allow_zero_min_length,
"volume_id": parameter_types.uuid,
"ref": {'type': ['object', 'null', 'string']},
},
'required': ['ref', 'volume_id'],
'additionalProperties': False,
},
},
'required': ['snapshot'],
'additionalProperties': False,
}
update = {
'type': 'object',
'properties': {
'meta': {
'type': 'object',
'patternProperties': {
'^[a-zA-Z0-9-_:. ]{1,255}$': {
'type': 'string',
'maxLength': 255,
'description': 'The snapshot metadata value.',
},
},
'minProperties': 1,
'maxProperties': 1,
},
},
'required': ['meta'],
'additionalProperties': True,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/snapshot_metadata.py 0000664 0000000 0000000 00000002537 15131732575 0026110 0 ustar 00root root 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
_metadata_properties = {
'type': 'object',
'patternProperties': {
'^[a-zA-Z0-9-_:. ]{1,255}$': {
'type': 'string',
'maxLength': 255,
'description': 'The snapshot metadata value.',
},
},
'additionalProperties': False,
}
_metadata_property = copy.deepcopy(_metadata_properties)
_metadata_property.update(
{
'minProperties': 1,
'maxProperties': 1,
},
)
create = {
'type': 'object',
'properties': {
'type': 'object',
'metadata': _metadata_properties,
},
'required': ['metadata'],
'additionalProperties': True,
}
update = {
'type': 'object',
'properties': {
'meta': _metadata_property,
},
'required': ['meta'],
'additionalProperties': True,
}
update_all = create
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/snapshot_unmanage.py 0000664 0000000 0000000 00000001375 15131732575 0026122 0 ustar 00root root 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# TODO: Restrict the value to 'null' in a future API version
unmanage = {
'type': 'object',
'properties': {
'os-unmanage': {},
},
'required': ['os-unmanage'],
'additionalProperties': False,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/snapshots.py 0000664 0000000 0000000 00000004301 15131732575 0024422 0 ustar 00root root 0000000 0000000 # Copyright (C) 2017 NTT DATA
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 Snapshots API.
"""
from cinder.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'type': 'object',
'snapshot': {
'type': 'object',
'properties': {
'name': parameter_types.name_allow_zero_min_length,
'display_name': parameter_types.name_allow_zero_min_length,
'description': parameter_types.description,
'volume_id': parameter_types.uuid_allow_null,
'force': parameter_types.boolean,
'metadata': parameter_types.metadata_allows_null,
},
'required': ['volume_id'],
'additionalProperties': False,
},
},
'required': ['snapshot'],
'additionalProperties': False,
}
update = {
'type': 'object',
'properties': {
'type': 'object',
'snapshot': {
'type': 'object',
'properties': {
'name': parameter_types.name_allow_zero_min_length,
'description': parameter_types.description,
'display_name': parameter_types.name_allow_zero_min_length,
'display_description': parameter_types.description,
},
'additionalProperties': False,
'anyOf': [
{'required': ['name']},
{'required': ['description']},
{'required': ['display_name']},
{'required': ['display_description']}
]
},
},
'required': ['snapshot'],
'additionalProperties': False,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/types_extra_specs.py 0000664 0000000 0000000 00000002066 15131732575 0026152 0 ustar 00root root 0000000 0000000 # Copyright (C) 2018 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 types_extra_specs API.
"""
import copy
from cinder.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'extra_specs': parameter_types.extra_specs_with_no_spaces_key
},
'required': ['extra_specs'],
'additionalProperties': False,
}
update = copy.deepcopy(parameter_types.extra_specs_with_no_spaces_key)
update.update({
'minProperties': 1,
'maxProperties': 1
})
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/volume_actions.py 0000664 0000000 0000000 00000015561 15131732575 0025441 0 ustar 00root root 0000000 0000000 # Copyright (C) 2018 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 volume_actions API.
"""
import copy
from cinder.api.validation import parameter_types
container_format = parameter_types.description
attach = {
'type': 'object',
'properties': {
'os-attach': {
'type': 'object',
'properties': {
'instance_uuid': parameter_types.uuid,
'mountpoint': {
'type': 'string', 'minLength': 1,
'maxLength': 255
},
'host_name': {'type': 'string', 'maxLength': 255},
'mode': {'type': 'string', 'enum': ['rw', 'ro']}
},
'required': ['mountpoint'],
'anyOf': [{'required': ['instance_uuid']},
{'required': ['host_name']}],
'additionalProperties': False,
},
},
'required': ['os-attach'],
'additionalProperties': False,
}
detach = {
'type': 'object',
'properties': {
'os-detach': {
'type': ['object', 'null'],
'properties': {
# NOTE(mriedem): This allows null for backward compatibility.
'attachment_id': parameter_types.uuid_allow_null,
},
'additionalProperties': False,
},
},
'required': ['os-detach'],
'additionalProperties': False,
}
# TODO: Restrict the value to 'null' in a future API version
reserve = {
'type': 'object',
'properties': {
'os-reserve': {},
},
'required': ['os-reserve'],
'additionalProperties': False,
}
# TODO: Restrict the value to 'null' in a future API version
unreserve = {
'type': 'object',
'properties': {
'os-unreserve': {},
},
'required': ['os-unreserve'],
'additionalProperties': False,
}
# TODO: Restrict the value to 'null' in a future API version
begin_detaching = {
'type': 'object',
'properties': {
'os-begin_detaching': {},
},
'required': ['os-begin_detaching'],
'additionalProperties': False,
}
# TODO: Restrict the value to 'null' in a future API version
roll_detaching = {
'type': 'object',
'properties': {
'os-roll_detaching': {},
},
'required': ['os-roll_detaching'],
'additionalProperties': False,
}
extend = {
'type': 'object',
'properties': {
'os-extend': {
'type': 'object',
'properties': {
'new_size': parameter_types.volume_size,
},
'required': ['new_size'],
'additionalProperties': False,
},
},
'required': ['os-extend'],
'additionalProperties': False,
}
retype = {
'type': 'object',
'properties': {
'os-retype': {
'type': 'object',
'properties': {
'new_type': {'type': 'string'},
'migration_policy': {
'type': ['string', 'null'],
'enum': ['on-demand', 'never']},
},
'required': ['new_type'],
'additionalProperties': False,
},
},
'required': ['os-retype'],
'additionalProperties': False,
}
set_bootable = {
'type': 'object',
'properties': {
'os-set_bootable': {
'type': 'object',
'properties': {
'bootable': parameter_types.boolean
},
'required': ['bootable'],
'additionalProperties': False,
},
},
'required': ['os-set_bootable'],
'additionalProperties': False,
}
volume_upload_image = {
'type': 'object',
'properties': {
'os-volume_upload_image': {
'type': 'object',
'properties': {
'image_name': {
'type': 'string', 'minLength': 1, 'maxLength': 255
},
'force': parameter_types.boolean,
'disk_format': {
'type': 'string',
'enum': ['raw', 'vmdk', 'vdi', 'qcow2',
'vhd', 'vhdx', 'ploop']
},
'container_format': container_format
},
'required': ['image_name'],
'additionalProperties': False,
},
},
'required': ['os-volume_upload_image'],
'additionalProperties': False,
}
volume_upload_image_v31 = copy.deepcopy(volume_upload_image)
volume_upload_image_v31['properties']['os-volume_upload_image']['properties'][
'visibility'] = {'type': 'string',
'enum': ['community', 'public', 'private', 'shared']}
volume_upload_image_v31['properties']['os-volume_upload_image']['properties'][
'protected'] = parameter_types.boolean
initialize_connection = {
'type': 'object',
'properties': {
'os-initialize_connection': {
'type': 'object',
'properties': {
'connector': {'type': ['object', 'string']},
},
'required': ['connector'],
'additionalProperties': False,
},
},
'required': ['os-initialize_connection'],
'additionalProperties': False,
}
terminate_connection = {
'type': 'object',
'properties': {
'os-terminate_connection': {
'type': 'object',
'properties': {
'connector': {'type': ['string', 'object', 'null']},
},
'required': ['connector'],
'additionalProperties': False,
},
},
'required': ['os-terminate_connection'],
'additionalProperties': False,
}
volume_readonly_update = {
'type': 'object',
'properties': {
'os-update_readonly_flag': {
'type': 'object',
'properties': {
'readonly': parameter_types.boolean
},
'required': ['readonly'],
'additionalProperties': False,
},
},
'required': ['os-update_readonly_flag'],
'additionalProperties': False,
}
reimage = {
'type': 'object',
'properties': {
'os-reimage': {
'type': 'object',
'properties': {
'image_id': parameter_types.uuid,
'reimage_reserved': parameter_types.boolean,
},
'required': ['image_id'],
'additionalProperties': False,
},
},
'required': ['os-reimage'],
'additionalProperties': False,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/volume_image_metadata.py 0000664 0000000 0000000 00000003505 15131732575 0026716 0 ustar 00root root 0000000 0000000 # Copyright (C) 2018 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 volume image metadata API.
"""
from cinder.api.validation import parameter_types
# TODO: Restrict the value to 'null' in a future API version
index = {
'type': 'object',
'properties': {
'os-show_image_metadata': {},
},
'required': ['os-show_image_metadata'],
'additionalProperties': False,
}
set_image_metadata = {
'type': 'object',
'properties': {
'os-set_image_metadata': {
'type': 'object',
'properties': {
'metadata': parameter_types.image_metadata,
},
'required': ['metadata'],
'additionalProperties': False,
},
},
'required': ['os-set_image_metadata'],
'additionalProperties': False,
}
unset_image_metadata = {
'type': 'object',
'properties': {
'os-unset_image_metadata': {
'type': 'object',
'properties': {
'key': {'type': 'string',
'minLength': 1,
'maxLength': 255},
},
'required': ['key'],
'additionalProperties': False,
},
},
'required': ['os-unset_image_metadata'],
'additionalProperties': False,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/volume_manage.py 0000664 0000000 0000000 00000003424 15131732575 0025224 0 ustar 00root root 0000000 0000000 # Copyright (C) 2018 NTT DATA
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 volume manage API.
"""
import copy
from cinder.api.validation import parameter_types
volume_manage_create = {
'type': 'object',
'properties': {
'volume': {
'type': 'object',
'properties': {
"description": parameter_types.description,
"availability_zone": parameter_types.
name_allow_zero_min_length,
"bootable": parameter_types.boolean,
"volume_type": parameter_types.name_allow_zero_min_length,
"name": parameter_types.name_allow_zero_min_length,
"host": parameter_types.cinder_host,
"ref": {'type': ['object', 'string']},
"metadata": parameter_types.metadata_allows_null,
},
'required': ['ref'],
'additionalProperties': False,
},
},
'required': ['volume'],
'additionalProperties': False,
}
volume_manage_create_v316 = copy.deepcopy(volume_manage_create)
volume_manage_create_v316['properties']['volume']['properties'][
'cluster'] = {'type': ['string', 'null'],
'minLength': 0, 'maxLength': 255}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/volume_metadata.py 0000664 0000000 0000000 00000002430 15131732575 0025550 0 ustar 00root root 0000000 0000000 # Copyright (C) 2017 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 Volume metadata API.
"""
import copy
from cinder.api.validation import parameter_types
metadata_restricted_properties = copy.deepcopy(parameter_types.extra_specs)
metadata_restricted_properties.update({
'minProperties': 1,
'maxProperties': 1
})
create = {
'type': 'object',
'properties': {
'type': 'object',
'metadata': parameter_types.extra_specs,
},
'required': ['metadata'],
'additionalProperties': False,
}
update = {
'type': 'object',
'properties': {
'type': 'object',
'meta': metadata_restricted_properties,
},
'required': ['meta'],
'additionalProperties': False,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/volume_transfer.py 0000664 0000000 0000000 00000004466 15131732575 0025627 0 ustar 00root root 0000000 0000000 # Copyright (C) 2018 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 volume transfer API.
"""
from cinder.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'transfer': {
'type': 'object',
'properties': {
'volume_id': parameter_types.uuid,
'name': {'oneOf': [{'type': 'string',
'format':
"name_skip_leading_trailing_spaces"},
{'type': 'null'}]},
},
'required': ['volume_id'],
'additionalProperties': False,
},
},
'required': ['transfer'],
'additionalProperties': False,
}
accept = {
'type': 'object',
'properties': {
'accept': {
'type': 'object',
'properties': {
'auth_key': {'type': ['string', 'integer']},
},
'required': ['auth_key'],
'additionalProperties': False,
},
},
'required': ['accept'],
'additionalProperties': False,
}
create_v355 = {
'type': 'object',
'properties': {
'transfer': {
'type': 'object',
'properties': {
'volume_id': parameter_types.uuid,
'name': {'oneOf': [{'type': 'string',
'format':
"name_skip_leading_trailing_spaces"},
{'type': 'null'}]},
'no_snapshots': parameter_types.boolean
},
'required': ['volume_id'],
'additionalProperties': False,
},
},
'required': ['transfer'],
'additionalProperties': False,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/volume_type_access.py 0000664 0000000 0000000 00000003147 15131732575 0026300 0 ustar 00root root 0000000 0000000 # Copyright (C) 2017 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 volume type access API.
"""
add_project_access = {
'type': 'object',
'properties': {
'type': 'object',
'addProjectAccess': {
'type': 'object',
'properties': {
'project': {'type': 'string',
'minLength': 1, 'maxLength': 255},
},
'required': ['project'],
'additionalProperties': False,
},
},
'required': ['addProjectAccess'],
'additionalProperties': False,
}
remove_project_access = {
'type': 'object',
'properties': {
'type': 'object',
'removeProjectAccess': {
'type': 'object',
'properties': {
'project': {'type': 'string',
'minLength': 1, 'maxLength': 255},
},
'required': ['project'],
'additionalProperties': False,
},
},
'required': ['removeProjectAccess'],
'additionalProperties': False,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/volume_type_encryption.py 0000664 0000000 0000000 00000002756 15131732575 0027236 0 ustar 00root root 0000000 0000000 # Copyright (C) 2018 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Schema for V3 volume type encryption API."""
import copy
from cinder.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'encryption': {
'type': 'object',
'properties': {
'key_size': parameter_types.key_size,
'provider': {'type': 'string', 'minLength': 0,
'maxLength': 255},
'control_location': {'enum': ['front-end', 'back-end']},
'cipher': {'type': ['string', 'null'],
'minLength': 0, 'maxLength': 255},
},
'required': ['provider', 'control_location'],
'additionalProperties': True,
},
},
'required': ['encryption'],
'additionalProperties': False,
}
update = copy.deepcopy(create)
update['properties']['encryption']['required'] = []
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/volume_types.py 0000664 0000000 0000000 00000003320 15131732575 0025133 0 ustar 00root root 0000000 0000000 # Copyright 2017 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cinder.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'type': 'object',
'volume_type': {
'type': 'object',
'properties': {
'name': parameter_types.name,
'description': parameter_types.description,
'extra_specs': parameter_types.extra_specs_with_null,
'os-volume-type-access:is_public': parameter_types.boolean,
},
'required': ['name'],
'additionalProperties': False,
},
},
'required': ['volume_type'],
'additionalProperties': False,
}
update = {
'type': 'object',
'properties': {
'type': 'object',
'volume_type': {
'type': 'object',
'properties': {
'name': parameter_types.update_name,
'description': parameter_types.description,
'is_public': parameter_types.boolean,
},
'additionalProperties': False,
},
},
'required': ['volume_type'],
'additionalProperties': False,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/volume_unmanage.py 0000664 0000000 0000000 00000001375 15131732575 0025572 0 ustar 00root root 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# TODO: Restrict the value to 'null' in a future API version
unmanage = {
'type': 'object',
'properties': {
'os-unmanage': {},
},
'required': ['os-unmanage'],
'additionalProperties': False,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/volumes.py 0000664 0000000 0000000 00000012336 15131732575 0024101 0 ustar 00root root 0000000 0000000 # Copyright (C) 2018 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 Volumes API.
"""
import copy
from cinder.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'volume': {
'type': 'object',
'properties': {
'name': {'type': ['string', 'null'],
'format': 'name_non_mandatory_remove_white_spaces'},
'description': {
'type': ['string', 'null'],
'format': 'description_non_mandatory_remove_white_spaces'},
'display_name': {
'type': ['string', 'null'],
'format': 'name_non_mandatory_remove_white_spaces'},
'display_description': {
'type': ['string', 'null'],
'format':
'description_non_mandatory_remove_white_spaces'},
# volume_type accepts 'id' as well as 'name' so do lazy schema
# validation for it.
'volume_type': parameter_types.name_allow_zero_min_length,
'metadata': parameter_types.metadata_allows_null,
'snapshot_id': parameter_types.optional_uuid,
'source_volid': parameter_types.optional_uuid,
'consistencygroup_id': parameter_types.optional_uuid,
'size': parameter_types.volume_size_allows_null,
'availability_zone': parameter_types.availability_zone,
# The functionality to create a multiattach volume by the
# multiattach parameter is removed.
# We accept the parameter but raise a BadRequest stating the
# "new way" of creating multiattach volumes i.e. with a
# multiattach volume type so users using the "old way"
# have ease of moving into the new functionality.
'multiattach': parameter_types.optional_boolean,
'image_id': {'type': ['string', 'null'], 'minLength': 0,
'maxLength': 255},
'imageRef': {'type': ['string', 'null'], 'minLength': 0,
'maxLength': 255},
},
'additionalProperties': True,
},
'OS-SCH-HNT:scheduler_hints': {
'type': ['object', 'null']
},
},
'required': ['volume'],
'additionalProperties': False,
}
create_volume_v313 = copy.deepcopy(create)
create_volume_v313['properties']['volume']['properties'][
'group_id'] = {'type': ['string', 'null'], 'minLength': 0,
'maxLength': 255}
create_volume_v347 = copy.deepcopy(create_volume_v313)
create_volume_v347['properties']['volume']['properties'][
'backup_id'] = parameter_types.optional_uuid
create_volume_v353 = copy.deepcopy(create_volume_v347)
create_volume_v353['properties']['volume']['additionalProperties'] = False
update = {
'type': 'object',
'properties': {
'volume': {
'type': 'object',
'properties': {
# The 'name' and 'description' are required to be compatible
# with v2.
'name': {
'type': ['string', 'null'],
'format': 'name_non_mandatory_remove_white_spaces'},
'description': {
'type': ['string', 'null'],
'format':
'description_non_mandatory_remove_white_spaces'},
'display_name': {
'type': ['string', 'null'],
'format': 'name_non_mandatory_remove_white_spaces'},
'display_description': {
'type': ['string', 'null'],
'format':
'description_non_mandatory_remove_white_spaces'},
'metadata': parameter_types.extra_specs,
},
'additionalProperties': False,
},
},
'required': ['volume'],
'additionalProperties': False,
}
update_v353 = copy.deepcopy(update)
update_v353['properties']['volume']['anyOf'] = [
{'required': ['name']},
{'required': ['description']},
{'required': ['display_name']},
{'required': ['display_description']},
{'required': ['metadata']}]
# TODO: Restrict additional properties in a future API version
revert = {
'type': 'object',
'properties': {
'revert': {
'type': 'object',
'properties': {
'snapshot_id': {
'type': ['string', 'null']
},
},
'additionalProperties': True,
},
},
'required': ['revert'],
'additionalProperties': False,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/schemas/workers.py 0000664 0000000 0000000 00000002304 15131732575 0024075 0 ustar 00root root 0000000 0000000 # Copyright (C) 2017 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 Workers API.
"""
from cinder.api.validation import parameter_types
cleanup = {
'type': 'object',
'properties': {
'cluster_name': parameter_types.hostname,
'disabled': parameter_types.boolean,
'host': parameter_types.hostname,
'is_up': parameter_types.boolean,
'binary': {'enum': ['cinder-volume', 'cinder-scheduler']},
'resource_id': parameter_types.optional_uuid,
'resource_type': parameter_types.resource_type,
'service_id': parameter_types.service_id,
},
'additionalProperties': False,
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/urlmap.py 0000664 0000000 0000000 00000024004 15131732575 0022257 0 ustar 00root root 0000000 0000000 # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from urllib.request import parse_http_list
import paste.urlmap
from cinder.api.openstack import wsgi
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
_option_header_piece_re = re.compile(
r';\s*([^\s;=]+|%s)\s*'
r'(?:=\s*([^;]+|%s))?\s*' %
(_quoted_string_re, _quoted_string_re))
def unquote_header_value(value):
"""Unquotes a header value.
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
return value
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in parse_http_list(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
def parse_options_header(value):
"""Parse 'Content-Type'-like header into a tuple.
Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('Content-Type: text/html; mimetype=text/html')
('Content-Type:', {'mimetype': 'text/html'})
:param value: the header to parse.
:return: (str, options)
"""
def _tokenize(string):
for match in _option_header_piece_re.finditer(string):
key, value = match.groups()
key = unquote_header_value(key)
if value is not None:
value = unquote_header_value(value)
yield key, value
if not value:
return '', {}
parts = _tokenize(';' + value)
name = next(parts)[0]
extra = dict(parts)
return name, extra
class Accept(object):
def __init__(self, value):
self._content_types = [parse_options_header(v) for v in
parse_list_header(value)]
def best_match(self, supported_content_types):
# FIXME: Should we have a more sophisticated matching algorithm that
# takes into account the version as well?
best_quality = -1
best_content_type = None
best_params = {}
best_match = '*/*'
for content_type in supported_content_types:
for content_mask, params in self._content_types:
try:
quality = float(params.get('q', 1))
except ValueError:
continue
if quality < best_quality:
continue
elif best_quality == quality:
if best_match.count('*') <= content_mask.count('*'):
continue
if self._match_mask(content_mask, content_type):
best_quality = quality
best_content_type = content_type
best_params = params
best_match = content_mask
return best_content_type, best_params
def content_type_params(self, best_content_type):
"""Find parameters in Accept header for given content type."""
for content_type, params in self._content_types:
if best_content_type == content_type:
return params
return {}
def _match_mask(self, mask, content_type):
if '*' not in mask:
return content_type == mask
if mask == '*/*':
return True
mask_major = mask[:-2]
content_type_major = content_type.split('/', 1)[0]
return content_type_major == mask_major
def urlmap_factory(loader, global_conf, **local_conf):
if 'not_found_app' in local_conf:
not_found_app = local_conf.pop('not_found_app')
else:
not_found_app = global_conf.get('not_found_app')
if not_found_app:
not_found_app = loader.get_app(not_found_app, global_conf=global_conf)
urlmap = URLMap(not_found_app=not_found_app)
for path, app_name in local_conf.items():
path = paste.urlmap.parse_path_expression(path)
app = loader.get_app(app_name, global_conf=global_conf)
urlmap[path] = app
return urlmap
class URLMap(paste.urlmap.URLMap):
def _match(self, host, port, path_info):
"""Find longest match for a given URL path."""
for (domain, app_url), app in self.applications:
if domain and domain != host and domain != host + ':' + port:
continue
if (path_info == app_url or path_info.startswith(app_url + '/')):
return app, app_url
return None, None
def _set_script_name(self, app, app_url):
def wrap(environ, start_response):
environ['SCRIPT_NAME'] += app_url
return app(environ, start_response)
return wrap
def _munge_path(self, app, path_info, app_url):
def wrap(environ, start_response):
environ['SCRIPT_NAME'] += app_url
environ['PATH_INFO'] = path_info[len(app_url):]
return app(environ, start_response)
return wrap
def _path_strategy(self, host, port, path_info):
"""Check path suffix for MIME type and path prefix for API version."""
mime_type = app = app_url = None
parts = path_info.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in wsgi.SUPPORTED_CONTENT_TYPES:
mime_type = possible_type
parts = path_info.split('/')
if len(parts) > 1:
possible_app, possible_app_url = self._match(host, port, path_info)
# Don't use prefix if it ends up matching default
if possible_app and possible_app_url:
app_url = possible_app_url
app = self._munge_path(possible_app, path_info, app_url)
return mime_type, app, app_url
def _content_type_strategy(self, host, port, environ):
"""Check Content-Type header for API version."""
app = None
params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1]
if 'version' in params:
app, app_url = self._match(host, port, '/v' + params['version'])
if app:
app = self._set_script_name(app, app_url)
return app
def _accept_strategy(self, host, port, environ, supported_content_types):
"""Check Accept header for best matching MIME type and API version."""
accept = Accept(environ.get('HTTP_ACCEPT', ''))
app = None
# Find the best match in the Accept header
mime_type, params = accept.best_match(supported_content_types)
if 'version' in params:
app, app_url = self._match(host, port, '/v' + params['version'])
if app:
app = self._set_script_name(app, app_url)
return mime_type, app
def __call__(self, environ, start_response):
host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower()
if ':' in host:
host, port = host.split(':', 1)
else:
if environ['wsgi.url_scheme'] == 'http':
port = '80'
else:
port = '443'
path_info = environ['PATH_INFO']
path_info = self.normalize_url(path_info, False)[1]
# The MIME type for the response is determined in one of two ways:
# 1) URL path suffix (eg /servers/detail.json)
# 2) Accept header (eg application/json;q=0.8)
# The API version is determined in one of three ways:
# 1) URL path prefix (eg /v1.1/tenant/servers/detail)
# 2) Content-Type header (eg application/json;version=1.1)
# 3) Accept header (eg application/json;q=0.8;version=1.1)
supported_content_types = list(wsgi.SUPPORTED_CONTENT_TYPES)
mime_type, app, app_url = self._path_strategy(host, port, path_info)
if not app:
app = self._content_type_strategy(host, port, environ)
if not mime_type or not app:
possible_mime_type, possible_app = self._accept_strategy(
host, port, environ, supported_content_types)
if possible_mime_type and not mime_type:
mime_type = possible_mime_type
if possible_app and not app:
app = possible_app
if not mime_type:
mime_type = 'application/json'
if not app:
# Didn't match a particular version, probably matches default
app, app_url = self._match(host, port, path_info)
if app:
app = self._munge_path(app, path_info, app_url)
if app:
environ['cinder.best_content_type'] = mime_type
return app(environ, start_response)
environ['paste.urlmap_object'] = self
return self.not_found_application(environ, start_response)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v2/ 0000775 0000000 0000000 00000000000 15131732575 0020734 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v2/__init__.py 0000664 0000000 0000000 00000000000 15131732575 0023033 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/ 0000775 0000000 0000000 00000000000 15131732575 0020735 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/__init__.py 0000664 0000000 0000000 00000000000 15131732575 0023034 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/attachments.py 0000664 0000000 0000000 00000030600 15131732575 0023621 0 ustar 00root root 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes attachments API."""
from http import HTTPStatus
from oslo_log import log as logging
import webob
from cinder.api import api_utils
from cinder.api import common
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import attachments as schema
from cinder.api.v3.views import attachments as attachment_views
from cinder.api import validation
from cinder import context as cinder_context
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
from cinder.policies import attachments as attachment_policy
from cinder.volume import api as volume_api
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
class AttachmentsController(wsgi.Controller):
"""The Attachments API controller for the OpenStack API."""
_view_builder_class = attachment_views.ViewBuilder
allowed_filters = {'volume_id', 'status', 'instance_id', 'attach_status'}
def __init__(self, ext_mgr=None):
"""Initialize controller class."""
self.volume_api = volume_api.API()
self.ext_mgr = ext_mgr
super(AttachmentsController, self).__init__()
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def show(self, req, id):
"""Return data about the given attachment."""
context = req.environ['cinder.context']
attachment = objects.VolumeAttachment.get_by_id(context, id)
volume = objects.Volume.get_by_id(cinder_context.get_admin_context(),
attachment.volume_id)
if volume.admin_metadata and 'format' in volume.admin_metadata:
attachment.connection_info['format'] = (
volume.admin_metadata['format'])
return attachment_views.ViewBuilder.detail(attachment)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def index(self, req):
"""Return a summary list of attachments."""
attachments = self._items(req)
return attachment_views.ViewBuilder.list(attachments)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def detail(self, req):
"""Return a detailed list of attachments."""
attachments = self._items(req)
return attachment_views.ViewBuilder.list(attachments, detail=True)
@common.process_general_filtering('attachment')
def _process_attachment_filtering(self, context=None, filters=None,
req_version=None):
api_utils.remove_invalid_filter_options(context, filters,
self.allowed_filters)
def _items(self, req):
"""Return a list of attachments, transformed through view builder."""
context = req.environ['cinder.context']
req_version = req.api_version_request
# Pop out non search_opts and create local variables
search_opts = req.GET.copy()
sort_keys, sort_dirs = common.get_sort_params(search_opts)
marker, limit, offset = common.get_pagination_params(search_opts)
self._process_attachment_filtering(context=context,
filters=search_opts,
req_version=req_version)
if search_opts.get('instance_id', None):
search_opts['instance_uuid'] = search_opts.pop('instance_id', None)
if context.is_admin and 'all_tenants' in search_opts:
del search_opts['all_tenants']
return objects.VolumeAttachmentList.get_all(
context, search_opts=search_opts, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_direction=sort_dirs)
else:
return objects.VolumeAttachmentList.get_all_by_project(
context, context.project_id, search_opts=search_opts,
marker=marker, limit=limit, offset=offset, sort_keys=sort_keys,
sort_direction=sort_dirs)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
@wsgi.response(HTTPStatus.OK)
@validation.schema(schema.create, mv.NEW_ATTACH,
mv.get_prior_version(mv.ATTACHMENT_CREATE_MODE_ARG))
@validation.schema(schema.create_v354, mv.ATTACHMENT_CREATE_MODE_ARG)
def create(self, req, body):
"""Create an attachment.
This method can be used to create an empty attachment (reserve) or to
create and initialize a volume attachment based on the provided input
parameters.
If the caller does not yet have the connector information but needs to
reserve an attachment for the volume (ie Nova BootFromVolume) the
create can be called with just the volume-uuid and the server
identifier. This will reserve an attachment, mark the volume as
reserved and prevent any new attachment_create calls from being made
until the attachment is updated (completed).
The alternative is that the connection can be reserved and initialized
all at once with a single call if the caller has all of the required
information (connector data) at the time of the call.
NOTE: In Nova terms server == instance, the server_id parameter
referenced below is the UUID of the Instance, for non-nova consumers
this can be a server UUID or some other arbitrary unique identifier.
Starting from microversion 3.54, we can pass the attach mode as
argument in the request body.
Expected format of the input parameter 'body':
.. code-block:: json
{
"attachment":
{
"volume_uuid": "volume-uuid",
"instance_uuid": "null|nova-server-uuid",
"connector": "null|",
"mode": "null|rw|ro"
}
}
Example connector:
.. code-block:: json
{
"connector":
{
"initiator": "iqn.1993-08.org.debian:01:cad181614cec",
"ip": "192.168.1.20",
"platform": "x86_64",
"host": "tempest-1",
"os_type": "linux2",
"multipath": false,
"mountpoint": "/dev/vdb",
"mode": "null|rw|ro"
}
}
NOTE all that's required for a reserve is volume_uuid
and an instance_uuid.
returns: A summary view of the attachment object
"""
context = req.environ['cinder.context']
instance_uuid = body['attachment'].get('instance_uuid')
volume_uuid = body['attachment']['volume_uuid']
volume_ref = objects.Volume.get_by_id(
context,
volume_uuid)
args = {'connector': body['attachment'].get('connector', None)}
if req.api_version_request.matches(mv.ATTACHMENT_CREATE_MODE_ARG):
# We check for attach_mode here and default to `null`
# if nothing's provided. This seems odd to not just
# set `rw`, BUT we want to keep compatability with
# setting the mode via the connector for now, so we
# use `null` as an identifier to distinguish that case
args['attach_mode'] = body['attachment'].get('mode', 'null')
err_msg = None
try:
attachment_ref = (
self.volume_api.attachment_create(context,
volume_ref,
instance_uuid,
**args))
except (exception.NotAuthorized,
exception.InvalidVolume):
raise
except exception.CinderException as ex:
err_msg = _(
"Unable to create attachment for volume (%s).") % ex.msg
LOG.exception(err_msg)
except Exception:
err_msg = _("Unable to create attachment for volume.")
LOG.exception(err_msg)
finally:
if err_msg:
raise webob.exc.HTTPInternalServerError(explanation=err_msg)
return attachment_views.ViewBuilder.detail(attachment_ref)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
@validation.schema(schema.update)
def update(self, req, id, body):
"""Update an attachment record.
Update a reserved attachment record with connector information and set
up the appropriate connection_info from the driver.
Expected format of the input parameter 'body':
.. code:: json
{
"attachment":
{
"connector":
{
"initiator": "iqn.1993-08.org.debian:01:cad181614cec",
"ip": "192.168.1.20",
"platform": "x86_64",
"host": "tempest-1",
"os_type": "linux2",
"multipath": false,
"mountpoint": "/dev/vdb",
"mode": "None|rw|ro"
}
}
}
"""
context = req.environ['cinder.context']
attachment_ref = (
objects.VolumeAttachment.get_by_id(context, id))
connector = body['attachment']['connector']
err_msg = None
try:
attachment_ref = (
self.volume_api.attachment_update(context,
attachment_ref,
connector))
except (exception.NotAuthorized, exception.Invalid):
raise
except exception.CinderException as ex:
err_msg = (
_("Unable to update attachment (%s).") % ex.msg)
LOG.exception(err_msg)
except Exception:
err_msg = _("Unable to update the attachment.")
LOG.exception(err_msg)
finally:
if err_msg:
raise webob.exc.HTTPInternalServerError(explanation=err_msg)
# TODO(jdg): Test this out some more, do we want to return and object
# or a dict?
return attachment_views.ViewBuilder.detail(attachment_ref)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def delete(self, req, id):
"""Delete an attachment.
Disconnects/Deletes the specified attachment, returns a list of any
known shared attachment-id's for the effected backend device.
returns: A summary list of any attachments sharing this connection
"""
context = req.environ['cinder.context']
attachment = objects.VolumeAttachment.get_by_id(context, id)
attachments = self.volume_api.attachment_delete(context, attachment)
return attachment_views.ViewBuilder.list(attachments)
@wsgi.response(HTTPStatus.NO_CONTENT)
@wsgi.Controller.api_version(mv.NEW_ATTACH_COMPLETION)
@wsgi.action('os-complete')
@validation.schema(schema.complete)
def complete(self, req, id, body):
"""Mark a volume attachment process as completed (in-use)."""
context = req.environ['cinder.context']
attachment_ref = (
objects.VolumeAttachment.get_by_id(context, id))
volume_ref = objects.Volume.get_by_id(
context,
attachment_ref.volume_id)
context.authorize(attachment_policy.COMPLETE_POLICY,
target_obj=attachment_ref)
attachment_ref.update(
{'attach_status': fields.VolumeAttachStatus.ATTACHED})
attachment_ref.save()
volume_ref.update({'status': 'in-use', 'attach_status': 'attached'})
volume_ref.save()
volume_utils.notify_about_volume_usage(context, volume_ref,
"attach.end")
def create_resource(ext_mgr):
"""Create the wsgi resource for this controller."""
return wsgi.Resource(AttachmentsController(ext_mgr))
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/backups.py 0000664 0000000 0000000 00000010704 15131732575 0022741 0 ustar 00root root 0000000 0000000 # Copyright (c) 2016 Intel, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The backups V3 API."""
from oslo_log import log as logging
from cinder.api.contrib import backups
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import backups as schema
from cinder.api.v3.views import backups as backup_views
from cinder.api import validation
from cinder.policies import backups as policy
LOG = logging.getLogger(__name__)
class BackupsController(backups.BackupsController):
"""The backups API controller for the OpenStack API V3."""
_view_builder_class = backup_views.ViewBuilder
@wsgi.Controller.api_version(mv.BACKUP_UPDATE)
@validation.schema(schema.update, mv.BACKUP_UPDATE,
mv.get_prior_version(mv.BACKUP_METADATA))
@validation.schema(schema.update_backup_v343, mv.BACKUP_METADATA)
def update(self, req, id, body):
"""Update a backup."""
context = req.environ['cinder.context']
req_version = req.api_version_request
backup_update = body['backup']
self.clean_name_and_description(backup_update)
update_dict = {}
if 'name' in backup_update:
update_dict['display_name'] = backup_update.pop('name')
if 'description' in backup_update:
update_dict['display_description'] = (
backup_update.pop('description'))
if (req_version.matches(
mv.BACKUP_METADATA) and 'metadata' in backup_update):
update_dict['metadata'] = backup_update.pop('metadata')
new_backup = self.backup_api.update(context, id, update_dict)
return self._view_builder.summary(req, new_backup)
def _add_backup_project_attribute(self, req, backup):
db_backup = req.get_db_backup(backup['id'])
key = "os-backup-project-attr:project_id"
backup[key] = db_backup['project_id']
def _add_backup_user_attribute(self, req, backup):
db_backup = req.get_db_backup(backup['id'])
key = "user_id"
backup[key] = db_backup['user_id']
def show(self, req, id):
"""Return data about the given backup."""
LOG.debug('Show backup with id %s.', id)
context = req.environ['cinder.context']
req_version = req.api_version_request
# Not found exception will be handled at the wsgi level
backup = self.backup_api.get(context, backup_id=id)
req.cache_db_backup(backup)
resp_backup = self._view_builder.detail(req, backup)
if req_version.matches(mv.BACKUP_PROJECT):
if context.authorize(policy.BACKUP_ATTRIBUTES_POLICY, fatal=False):
self._add_backup_project_attribute(req, resp_backup['backup'])
if req_version.matches(mv.BACKUP_PROJECT_USER_ID):
if context.authorize(policy.BACKUP_ATTRIBUTES_POLICY, fatal=False):
self._add_backup_user_attribute(req, resp_backup['backup'])
return resp_backup
def detail(self, req):
resp_backup = super(BackupsController, self).detail(req)
context = req.environ['cinder.context']
req_version = req.api_version_request
if req_version.matches(mv.BACKUP_PROJECT):
if context.authorize(policy.BACKUP_ATTRIBUTES_POLICY, fatal=False):
for bak in resp_backup['backups']:
self._add_backup_project_attribute(req, bak)
if req_version.matches(mv.BACKUP_PROJECT_USER_ID):
if context.authorize(policy.BACKUP_ATTRIBUTES_POLICY, fatal=False):
for bak in resp_backup['backups']:
self._add_backup_user_attribute(req, bak)
return resp_backup
def _convert_sort_name(self, req_version, sort_keys):
if req_version.matches(mv.BACKUP_SORT_NAME) and 'name' in sort_keys:
sort_keys[sort_keys.index('name')] = 'display_name'
def create_resource():
return wsgi.Resource(BackupsController())
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/clusters.py 0000664 0000000 0000000 00000014507 15131732575 0023162 0 ustar 00root root 0000000 0000000 # Copyright (c) 2016 Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import clusters as schema
from cinder.api.v3.views import clusters as clusters_view
from cinder.api import validation
from cinder.common import constants
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.policies import clusters as policy
from cinder import utils
class ClusterController(wsgi.Controller):
allowed_list_keys = {'name', 'binary', 'is_up', 'disabled', 'num_hosts',
'num_down_hosts', 'binary', 'replication_status',
'frozen', 'active_backend_id'}
replication_fields = {'replication_status', 'frozen', 'active_backend_id'}
@wsgi.Controller.api_version(mv.CLUSTER_SUPPORT)
def show(self, req, id, binary=constants.VOLUME_BINARY):
"""Return data for a given cluster name with optional binary."""
# Let the wsgi middleware convert NotAuthorized exceptions
context = req.environ['cinder.context']
context.authorize(policy.GET_POLICY)
# Let the wsgi middleware convert NotFound exceptions
cluster = objects.Cluster.get_by_id(context, None, binary=binary,
name=id, services_summary=True)
replication_data = req.api_version_request.matches(
mv.REPLICATION_CLUSTER)
return clusters_view.ViewBuilder.detail(cluster, replication_data)
@wsgi.Controller.api_version(mv.CLUSTER_SUPPORT)
def index(self, req):
"""Return a non detailed list of all existing clusters.
Filter by is_up, disabled, num_hosts, and num_down_hosts.
"""
return self._get_clusters(req, detail=False)
@wsgi.Controller.api_version(mv.CLUSTER_SUPPORT)
def detail(self, req):
"""Return a detailed list of all existing clusters.
Filter by is_up, disabled, num_hosts, and num_down_hosts.
"""
return self._get_clusters(req, detail=True)
def _get_clusters(self, req, detail):
# Let the wsgi middleware convert NotAuthorized exceptions
context = req.environ['cinder.context']
context.authorize(policy.GET_ALL_POLICY)
replication_data = req.api_version_request.matches(
mv.REPLICATION_CLUSTER)
filters = dict(req.GET)
allowed = self.allowed_list_keys
if not replication_data:
allowed = allowed.difference(self.replication_fields)
# Check filters are valid
if not allowed.issuperset(filters):
invalid_keys = set(filters).difference(allowed)
msg = _('Invalid filter keys: %s') % ', '.join(invalid_keys)
raise exception.InvalidInput(reason=msg)
# Check boolean values
for bool_key in ('disabled', 'is_up'):
if bool_key in filters:
filters[bool_key] = utils.get_bool_param(bool_key, req.GET)
# For detailed view we need the services summary information
filters['services_summary'] = detail
clusters = objects.ClusterList.get_all(context, **filters)
return clusters_view.ViewBuilder.list(clusters, detail,
replication_data)
def _update(
self, context, name, binary, disabled, disabled_reason=None,
show_replication_data=False,
):
"""Enable/Disable scheduling for a cluster."""
# NOTE(geguileo): This method tries to be consistent with services
# update endpoint API.
# Let wsgi handle NotFound exception
cluster = objects.Cluster.get_by_id(context, None, binary=binary,
name=name)
cluster.disabled = disabled
cluster.disabled_reason = disabled_reason
cluster.save()
# We return summary data plus the disabled reason
ret_val = clusters_view.ViewBuilder.summary(
cluster, show_replication_data,
)
ret_val['cluster']['disabled_reason'] = disabled_reason
return ret_val
@wsgi.Controller.api_version(mv.CLUSTER_SUPPORT)
@validation.schema(schema.disable_cluster)
def disable(self, req, body):
# NOTE(geguileo): This method tries to be consistent with services
# disable endpoint API.
# Let the wsgi middleware convert NotAuthorized exceptions
context = req.environ['cinder.context']
context.authorize(policy.UPDATE_POLICY)
name = body['name']
binary = body.get('binary', constants.VOLUME_BINARY)
disabled_reason = body.get('disabled_reason')
if disabled_reason:
disabled_reason = disabled_reason.strip()
show_replication_data = req.api_version_request.matches(
mv.REPLICATION_CLUSTER)
return self._update(
context, name=name, binary=binary, disabled=True,
disabled_reason=disabled_reason,
show_replication_data=show_replication_data)
@wsgi.Controller.api_version(mv.CLUSTER_SUPPORT)
@validation.schema(schema.enable_cluster)
def enable(self, req, body):
# NOTE(geguileo): This method tries to be consistent with services
# enable endpoint API.
# Let the wsgi middleware convert NotAuthorized exceptions
context = req.environ['cinder.context']
context.authorize(policy.UPDATE_POLICY)
name = body['name']
binary = body.get('binary', constants.VOLUME_BINARY)
show_replication_data = req.api_version_request.matches(
mv.REPLICATION_CLUSTER)
return self._update(
context, name=name, binary=binary, disabled=False,
show_replication_data=show_replication_data)
def create_resource():
return wsgi.Resource(ClusterController())
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/consistencygroups.py 0000664 0000000 0000000 00000007557 15131732575 0025126 0 ustar 00root root 0000000 0000000 #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The consistencygroups V3 API."""
from http import HTTPStatus
from oslo_log import log as logging
import webob
from webob import exc
from cinder.api.contrib import consistencygroups
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import consistencygroups as schema
from cinder.api import validation
from cinder.i18n import _
from cinder.policies import groups as group_policy
LOG = logging.getLogger(__name__)
class ConsistencyGroupsController(
consistencygroups.ConsistencyGroupsController,
):
"""The ConsistencyGroups API controller for the OpenStack API V3."""
def _check_update_parameters_v3(self, req, name, description, add_volumes,
remove_volumes):
allow_empty = req.api_version_request.matches(
mv.CG_UPDATE_BLANK_PROPERTIES, None)
if allow_empty:
if (name is None and description is None
and not add_volumes and not remove_volumes):
msg = _("Must specify one or more of the following keys to "
"update: name, description, "
"add_volumes, remove_volumes.")
raise exc.HTTPBadRequest(explanation=msg)
else:
if not (name or description or add_volumes or remove_volumes):
msg = _("Name, description, add_volumes, and remove_volumes "
"can not be all empty in the request body.")
raise exc.HTTPBadRequest(explanation=msg)
return allow_empty
@validation.schema(schema.update)
def update(self, req, id, body):
"""Update the consistency group.
Expected format of the input parameter 'body':
.. code-block:: json
{
"consistencygroup":
{
"name": "my_cg",
"description": "My consistency group",
"add_volumes": "volume-uuid-1,volume-uuid-2,...",
"remove_volumes": "volume-uuid-8,volume-uuid-9,..."
}
}
"""
LOG.debug('Update called for consistency group %s.', id)
if not body:
msg = _("Missing request body.")
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['cinder.context']
group = self._get(context, id)
context.authorize(group_policy.UPDATE_POLICY, target_obj=group)
consistencygroup = body['consistencygroup']
self.clean_name_and_description(consistencygroup)
name = consistencygroup.get('name', None)
description = consistencygroup.get('description', None)
add_volumes = consistencygroup.get('add_volumes', None)
remove_volumes = consistencygroup.get('remove_volumes', None)
allow_empty = self._check_update_parameters_v3(req, name,
description,
add_volumes,
remove_volumes)
self._update(context, group, name, description, add_volumes,
remove_volumes, allow_empty)
return webob.Response(status_int=HTTPStatus.ACCEPTED)
def create_resource():
return wsgi.Resource(ConsistencyGroupsController())
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/default_types.py 0000664 0000000 0000000 00000007777 15131732575 0024201 0 ustar 00root root 0000000 0000000 # Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The resource filters api."""
from http import HTTPStatus
from webob import exc
from cinder.api import api_utils as utils
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import default_types as schema
from cinder.api.v3.views import default_types as default_types_view
from cinder.api import validation
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.policies import default_types as policy
class DefaultTypesController(wsgi.Controller):
"""The Default types API controller for the OpenStack API."""
_view_builder_class = default_types_view.ViewBuilder
@wsgi.response(HTTPStatus.OK)
@wsgi.Controller.api_version(mv.DEFAULT_TYPE_OVERRIDES)
@validation.schema(schema.create_or_update)
def create_update(self, req, id, body):
"""Set a default volume type for the specified project."""
context = req.environ['cinder.context']
project_id = id
volume_type_id = body['default_type']['volume_type']
utils.validate_project_and_authorize(context, project_id,
policy.CREATE_UPDATE_POLICY)
try:
volume_type_id = objects.VolumeType.get_by_name_or_id(
context, volume_type_id).id
except exception.VolumeTypeNotFound as e:
raise exc.HTTPBadRequest(explanation=e.msg)
default_type = db.project_default_volume_type_set(
context, volume_type_id, project_id)
return self._view_builder.create(default_type)
@wsgi.response(HTTPStatus.OK)
@wsgi.Controller.api_version(mv.DEFAULT_TYPE_OVERRIDES)
def detail(self, req, id):
"""Return detail of a default type."""
context = req.environ['cinder.context']
project_id = id
utils.validate_project_and_authorize(context, project_id,
policy.GET_POLICY)
default_type = db.project_default_volume_type_get(context, project_id)
if not default_type:
raise exception.VolumeTypeProjectDefaultNotFound(
project_id=project_id)
return self._view_builder.detail(default_type)
@wsgi.response(HTTPStatus.OK)
@wsgi.Controller.api_version(mv.DEFAULT_TYPE_OVERRIDES)
def index(self, req):
"""Return a list of default types."""
context = req.environ['cinder.context']
try:
context.authorize(policy.GET_ALL_POLICY)
except exception.NotAuthorized:
explanation = _("You are not authorized to perform this "
"operation.")
raise exc.HTTPForbidden(explanation=explanation)
default_types = db.project_default_volume_type_get(context)
return self._view_builder.index(default_types)
@wsgi.response(HTTPStatus.NO_CONTENT)
@wsgi.Controller.api_version(mv.DEFAULT_TYPE_OVERRIDES)
def delete(self, req, id):
"""Unset a default volume type for a project."""
context = req.environ['cinder.context']
project_id = id
utils.validate_project_and_authorize(context, project_id,
policy.DELETE_POLICY)
db.project_default_volume_type_unset(context, id)
def create_resource():
"""Create the wsgi resource for this controller."""
return wsgi.Resource(DefaultTypesController())
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/group_snapshots.py 0000664 0000000 0000000 00000022076 15131732575 0024554 0 ustar 00root root 0000000 0000000 # Copyright (C) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The group_snapshots API."""
from http import HTTPStatus
from oslo_log import log as logging
import webob
from webob import exc
from cinder.api import common
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import group_snapshots as schema
from cinder.api.v3.views import group_snapshots as group_snapshot_views
from cinder.api import validation
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _
from cinder import rpc
from cinder.volume import group_types
LOG = logging.getLogger(__name__)
class GroupSnapshotsController(wsgi.Controller):
"""The group_snapshots API controller for the OpenStack API."""
_view_builder_class = group_snapshot_views.ViewBuilder
def __init__(self):
self.group_snapshot_api = group_api.API()
super(GroupSnapshotsController, self).__init__()
def _check_default_cgsnapshot_type(self, group_type_id):
if group_types.is_default_cgsnapshot_type(group_type_id):
msg = (_("Group_type %(group_type)s is reserved for migrating "
"CGs to groups. Migrated group snapshots can only be "
"operated by CG snapshot APIs.")
% {'group_type': group_type_id})
raise exc.HTTPBadRequest(explanation=msg)
@wsgi.Controller.api_version(mv.GROUP_SNAPSHOTS)
def show(self, req, id):
"""Return data about the given group_snapshot."""
LOG.debug('show called for member %s', id)
context = req.environ['cinder.context']
group_snapshot = self.group_snapshot_api.get_group_snapshot(
context,
group_snapshot_id=id)
self._check_default_cgsnapshot_type(group_snapshot.group_type_id)
return self._view_builder.detail(req, group_snapshot)
@wsgi.Controller.api_version(mv.GROUP_SNAPSHOTS)
def delete(self, req, id):
"""Delete a group_snapshot."""
LOG.debug('delete called for member %s', id)
context = req.environ['cinder.context']
LOG.info('Delete group_snapshot with id: %s', id, context=context)
try:
group_snapshot = self.group_snapshot_api.get_group_snapshot(
context,
group_snapshot_id=id)
self._check_default_cgsnapshot_type(group_snapshot.group_type_id)
self.group_snapshot_api.delete_group_snapshot(context,
group_snapshot)
except exception.InvalidGroupSnapshot as e:
raise exc.HTTPBadRequest(explanation=str(e))
except (exception.GroupSnapshotNotFound,
exception.PolicyNotAuthorized):
# Not found exception will be handled at the wsgi level
raise
except Exception:
msg = _("Error occurred when deleting group snapshot %s.") % id
LOG.exception(msg)
raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=HTTPStatus.ACCEPTED)
@wsgi.Controller.api_version(mv.GROUP_SNAPSHOTS)
def index(self, req):
"""Returns a summary list of group_snapshots."""
return self._get_group_snapshots(req, is_detail=False)
@wsgi.Controller.api_version(mv.GROUP_SNAPSHOTS)
def detail(self, req):
"""Returns a detailed list of group_snapshots."""
return self._get_group_snapshots(req, is_detail=True)
def _get_group_snapshots(self, req, is_detail):
"""Returns a list of group_snapshots through view builder."""
context = req.environ['cinder.context']
req_version = req.api_version_request
filters = marker = limit = offset = sort_keys = sort_dirs = None
if req_version.matches(mv.GROUP_SNAPSHOT_PAGINATION):
filters = req.params.copy()
marker, limit, offset = common.get_pagination_params(filters)
sort_keys, sort_dirs = common.get_sort_params(filters)
if req_version.matches(mv.RESOURCE_FILTER):
support_like = (True if req_version.matches(
mv.LIKE_FILTER) else False)
common.reject_invalid_filters(context, filters, 'group_snapshot',
support_like)
group_snapshots = self.group_snapshot_api.get_all_group_snapshots(
context, filters=filters, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs)
if is_detail:
group_snapshots = self._view_builder.detail_list(req,
group_snapshots)
else:
group_snapshots = self._view_builder.summary_list(req,
group_snapshots)
new_group_snapshots = []
for grp_snap in group_snapshots['group_snapshots']:
try:
# Only show group snapshots not migrated from CG snapshots
self._check_default_cgsnapshot_type(grp_snap['group_type_id'])
if not is_detail:
grp_snap.pop('group_type_id', None)
new_group_snapshots.append(grp_snap)
except exc.HTTPBadRequest:
# Skip migrated group snapshot
pass
group_snapshots['group_snapshots'] = new_group_snapshots
return group_snapshots
@wsgi.Controller.api_version(mv.GROUP_SNAPSHOTS)
@wsgi.response(HTTPStatus.ACCEPTED)
@validation.schema(schema.create)
def create(self, req, body):
"""Create a new group_snapshot."""
LOG.debug('Creating new group_snapshot %s', body)
context = req.environ['cinder.context']
group_snapshot = body['group_snapshot']
group_id = group_snapshot['group_id']
group = self.group_snapshot_api.get(context, group_id)
self._check_default_cgsnapshot_type(group.group_type_id)
name = group_snapshot.get('name', None)
description = group_snapshot.get('description', None)
LOG.info("Creating group_snapshot %(name)s.",
{'name': name},
context=context)
try:
new_group_snapshot = self.group_snapshot_api.create_group_snapshot(
context, group, name, description)
except (exception.InvalidGroup,
exception.InvalidGroupSnapshot,
exception.InvalidVolume) as error:
raise exc.HTTPBadRequest(explanation=error.msg)
retval = self._view_builder.summary(req, new_group_snapshot)
return retval
@wsgi.Controller.api_version(mv.GROUP_SNAPSHOT_RESET_STATUS)
@wsgi.action("reset_status")
@validation.schema(schema.reset_status)
def reset_status(self, req, id, body):
return self._reset_status(req, id, body)
def _reset_status(self, req, id, body):
"""Reset status on group snapshots"""
context = req.environ['cinder.context']
status = body['reset_status']['status'].lower()
LOG.debug("Updating group '%(id)s' with "
"'%(update)s'", {'id': id,
'update': status})
try:
notifier = rpc.get_notifier('groupSnapshotStatusUpdate')
notifier.info(context, 'groupsnapshots.reset_status.start',
{'id': id,
'update': status})
gsnapshot = self.group_snapshot_api.get_group_snapshot(context, id)
self.group_snapshot_api.reset_group_snapshot_status(context,
gsnapshot,
status)
notifier.info(context, 'groupsnapshots.reset_status.end',
{'id': id,
'update': status})
except exception.GroupSnapshotNotFound as error:
# Not found exception will be handled at the wsgi level
notifier.error(context, 'groupsnapshots.reset_status',
{'error_message': error.msg,
'id': id})
raise
except exception.InvalidGroupSnapshotStatus as error:
notifier.error(context, 'groupsnapshots.reset_status',
{'error_message': error.msg,
'id': id})
raise exc.HTTPBadRequest(explanation=error.msg)
return webob.Response(status_int=HTTPStatus.ACCEPTED)
def create_resource():
return wsgi.Resource(GroupSnapshotsController())
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/group_specs.py 0000664 0000000 0000000 00000012107 15131732575 0023641 0 ustar 00root root 0000000 0000000 # Copyright (c) 2016 EMC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The group types specs controller"""
from http import HTTPStatus
import webob
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import group_specs as schema
from cinder.api import validation
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.policies import group_types as policy
from cinder import rpc
from cinder.volume import group_types
class GroupTypeSpecsController(wsgi.Controller):
"""The group type specs API controller for the OpenStack API."""
def _get_group_specs(self, context, group_type_id):
group_specs = db.group_type_specs_get(context, group_type_id)
specs_dict = {}
for key, value in group_specs.items():
specs_dict[key] = value
return dict(group_specs=specs_dict)
def _check_type(self, context, group_type_id):
try:
group_types.get_group_type(context, group_type_id)
except exception.GroupTypeNotFound as ex:
raise webob.exc.HTTPNotFound(explanation=ex.msg)
@wsgi.Controller.api_version(mv.GROUP_TYPE)
def index(self, req, group_type_id):
"""Returns the list of group specs for a given group type."""
context = req.environ['cinder.context']
context.authorize(policy.SPEC_GET_ALL_POLICY)
self._check_type(context, group_type_id)
return self._get_group_specs(context, group_type_id)
@wsgi.Controller.api_version(mv.GROUP_TYPE)
@wsgi.response(HTTPStatus.ACCEPTED)
@validation.schema(schema.create)
def create(self, req, group_type_id, body):
context = req.environ['cinder.context']
context.authorize(policy.SPEC_CREATE_POLICY)
self._check_type(context, group_type_id)
specs = body['group_specs']
db.group_type_specs_update_or_create(context,
group_type_id,
specs)
notifier_info = dict(type_id=group_type_id, specs=specs)
notifier = rpc.get_notifier('groupTypeSpecs')
notifier.info(context, 'group_type_specs.create',
notifier_info)
return body
@wsgi.Controller.api_version(mv.GROUP_TYPE)
@validation.schema(schema.update)
def update(self, req, group_type_id, id, body):
context = req.environ['cinder.context']
context.authorize(policy.SPEC_UPDATE_POLICY)
self._check_type(context, group_type_id)
if id not in body:
expl = _('Request body and URI mismatch')
raise webob.exc.HTTPBadRequest(explanation=expl)
db.group_type_specs_update_or_create(context,
group_type_id,
body)
notifier_info = dict(type_id=group_type_id, id=id)
notifier = rpc.get_notifier('groupTypeSpecs')
notifier.info(context,
'group_type_specs.update',
notifier_info)
return body
@wsgi.Controller.api_version(mv.GROUP_TYPE)
def show(self, req, group_type_id, id):
"""Return a single extra spec item."""
context = req.environ['cinder.context']
context.authorize(policy.SPEC_GET_POLICY)
self._check_type(context, group_type_id)
specs = self._get_group_specs(context, group_type_id)
if id in specs['group_specs']:
return {id: specs['group_specs'][id]}
else:
msg = _("Group Type %(type_id)s has no extra spec with key "
"%(id)s.") % ({'type_id': group_type_id, 'id': id})
raise webob.exc.HTTPNotFound(explanation=msg)
@wsgi.Controller.api_version(mv.GROUP_TYPE)
def delete(self, req, group_type_id, id):
"""Deletes an existing group spec."""
context = req.environ['cinder.context']
context.authorize(policy.SPEC_DELETE_POLICY)
self._check_type(context, group_type_id)
try:
db.group_type_specs_delete(context, group_type_id, id)
except exception.GroupTypeSpecsNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
notifier_info = dict(type_id=group_type_id, id=id)
notifier = rpc.get_notifier('groupTypeSpecs')
notifier.info(context,
'group_type_specs.delete',
notifier_info)
return webob.Response(status_int=HTTPStatus.ACCEPTED)
def create_resource():
return wsgi.Resource(GroupTypeSpecsController())
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/group_types.py 0000664 0000000 0000000 00000022457 15131732575 0023701 0 ustar 00root root 0000000 0000000 # Copyright (c) 2016 EMC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The group type & group type specs controller."""
from http import HTTPStatus
from oslo_utils import strutils
import webob
from webob import exc
from cinder.api import api_utils
from cinder.api import common
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import group_types as schema
from cinder.api.v3.views import group_types as views_types
from cinder.api import validation
from cinder import exception
from cinder.i18n import _
from cinder.policies import group_types as policy
from cinder import rpc
from cinder import utils
from cinder.volume import group_types
class GroupTypesController(wsgi.Controller):
"""The group types API controller for the OpenStack API."""
_view_builder_class = views_types.ViewBuilder
@utils.if_notifications_enabled
def _notify_group_type_error(self, context, method, err,
group_type=None, id=None, name=None):
payload = dict(
group_types=group_type, name=name, id=id, error_message=err)
rpc.get_notifier('groupType').error(context, method, payload)
@utils.if_notifications_enabled
def _notify_group_type_info(self, context, method, group_type):
payload = dict(group_types=group_type)
rpc.get_notifier('groupType').info(context, method, payload)
@wsgi.Controller.api_version(mv.GROUP_TYPE)
@wsgi.response(HTTPStatus.ACCEPTED)
@validation.schema(schema.create)
def create(self, req, body):
"""Creates a new group type."""
context = req.environ['cinder.context']
context.authorize(policy.CREATE_POLICY)
grp_type = body['group_type']
name = grp_type['name']
description = grp_type.get('description')
specs = grp_type.get('group_specs', {})
is_public = strutils.bool_from_string(grp_type.get('is_public', True),
strict=True)
try:
group_types.create(context,
name,
specs,
is_public,
description=description)
grp_type = group_types.get_group_type_by_name(context, name)
req.cache_resource(grp_type, name='group_types')
self._notify_group_type_info(
context, 'group_type.create', grp_type)
except exception.GroupTypeExists as err:
self._notify_group_type_error(
context, 'group_type.create', err, group_type=grp_type)
raise webob.exc.HTTPConflict(explanation=str(err))
except exception.GroupTypeNotFoundByName as err:
self._notify_group_type_error(
context, 'group_type.create', err, name=name)
raise webob.exc.HTTPNotFound(explanation=err.msg)
return self._view_builder.show(req, grp_type)
@wsgi.Controller.api_version(mv.GROUP_TYPE)
@validation.schema(schema.update)
def update(self, req, id, body):
# Update description for a given group type.
context = req.environ['cinder.context']
context.authorize(policy.UPDATE_POLICY)
grp_type = body['group_type']
description = grp_type.get('description')
name = grp_type.get('name')
is_public = grp_type.get('is_public')
if is_public is not None:
is_public = strutils.bool_from_string(is_public, strict=True)
# If name specified, name can not be empty.
if name and len(name.strip()) == 0:
msg = _("Group type name can not be empty.")
raise webob.exc.HTTPBadRequest(explanation=msg)
# Name, description and is_public can not be None.
# Specify one of them, or a combination thereof.
if name is None and description is None and is_public is None:
msg = _("Specify group type name, description or "
"a combination thereof.")
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
group_types.update(context, id, name, description,
is_public=is_public)
# Get the updated
grp_type = group_types.get_group_type(context, id)
req.cache_resource(grp_type, name='group_types')
self._notify_group_type_info(
context, 'group_type.update', grp_type)
except exception.GroupTypeNotFound as err:
self._notify_group_type_error(
context, 'group_type.update', err, id=id)
raise webob.exc.HTTPNotFound(explanation=str(err))
except exception.GroupTypeExists as err:
self._notify_group_type_error(
context, 'group_type.update', err, group_type=grp_type)
raise webob.exc.HTTPConflict(explanation=str(err))
except exception.GroupTypeUpdateFailed as err:
self._notify_group_type_error(
context, 'group_type.update', err, group_type=grp_type)
raise webob.exc.HTTPInternalServerError(
explanation=str(err))
return self._view_builder.show(req, grp_type)
@wsgi.Controller.api_version(mv.GROUP_TYPE)
def delete(self, req, id):
"""Deletes an existing group type."""
context = req.environ['cinder.context']
context.authorize(policy.DELETE_POLICY)
try:
grp_type = group_types.get_group_type(context, id)
group_types.destroy(context, grp_type['id'])
self._notify_group_type_info(
context, 'group_type.delete', grp_type)
except exception.GroupTypeInUse as err:
self._notify_group_type_error(
context, 'group_type.delete', err, group_type=grp_type)
msg = _('Target group type is still in use.')
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.GroupTypeNotFound as err:
self._notify_group_type_error(
context, 'group_type.delete', err, id=id)
raise webob.exc.HTTPNotFound(explanation=err.msg)
return webob.Response(status_int=HTTPStatus.ACCEPTED)
@wsgi.Controller.api_version(mv.GROUP_TYPE)
def index(self, req):
"""Returns the list of group types."""
limited_types = self._get_group_types(req)
req.cache_resource(limited_types, name='group_types')
return self._view_builder.index(req, limited_types)
@wsgi.Controller.api_version(mv.GROUP_TYPE)
def show(self, req, id):
"""Return a single group type item."""
context = req.environ['cinder.context']
# get default group type
if id is not None and id == 'default':
grp_type = group_types.get_default_group_type()
if not grp_type:
msg = _("Default group type can not be found.")
raise exc.HTTPNotFound(explanation=msg)
req.cache_resource(grp_type, name='group_types')
else:
try:
grp_type = group_types.get_group_type(context, id)
req.cache_resource(grp_type, name='group_types')
except exception.GroupTypeNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
return self._view_builder.show(req, grp_type)
def _get_group_types(self, req):
"""Helper function that returns a list of type dicts."""
params = req.params.copy()
marker, limit, offset = common.get_pagination_params(params)
sort_keys, sort_dirs = common.get_sort_params(params)
filters = {}
context = req.environ['cinder.context']
if context.is_admin:
# Only admin has query access to all group types
filters['is_public'] = api_utils._parse_is_public(
req.params.get('is_public', None))
else:
filters['is_public'] = True
api_utils.remove_invalid_filter_options(
context,
filters,
self._get_grp_type_filter_options())
limited_types = group_types.get_all_group_types(context,
filters=filters,
marker=marker,
limit=limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
offset=offset,
list_result=True)
return limited_types
def _get_grp_type_filter_options(self):
"""Return group type search options allowed by non-admin."""
return ['is_public']
def create_resource():
return wsgi.Resource(GroupTypesController())
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/groups.py 0000664 0000000 0000000 00000040300 15131732575 0022623 0 ustar 00root root 0000000 0000000 # Copyright (c) 2016 EMC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The groups controller."""
from http import HTTPStatus
from oslo_log import log as logging
from oslo_utils import strutils
from oslo_utils import uuidutils
import webob
from webob import exc
from cinder.api import common
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import groups as schema
from cinder.api.v3.views import groups as views_groups
from cinder.api import validation
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _
from cinder import rpc
from cinder.volume import group_types
LOG = logging.getLogger(__name__)
class GroupsController(wsgi.Controller):
"""The groups API controller for the OpenStack API."""
_view_builder_class = views_groups.ViewBuilder
def __init__(self):
self.group_api = group_api.API()
super(GroupsController, self).__init__()
def _check_default_cgsnapshot_type(self, group_type_id):
if group_types.is_default_cgsnapshot_type(group_type_id):
msg = _("Group_type %(group_type)s is reserved for migrating "
"CGs to groups. Migrated group can only be operated by "
"CG APIs.") % {'group_type': group_type_id}
raise exc.HTTPBadRequest(explanation=msg)
@wsgi.Controller.api_version(mv.GROUP_VOLUME)
def show(self, req, id):
"""Return data about the given group."""
LOG.debug('show called for member %s', id)
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
group = self.group_api.get(
context,
group_id=id)
self._check_default_cgsnapshot_type(group.group_type_id)
return self._view_builder.detail(req, group)
@wsgi.Controller.api_version(mv.GROUP_VOLUME_RESET_STATUS)
@wsgi.action("reset_status")
@validation.schema(schema.reset_status)
def reset_status(self, req, id, body):
return self._reset_status(req, id, body)
def _reset_status(self, req, id, body):
"""Reset status on generic group."""
context = req.environ['cinder.context']
status = body['reset_status']['status'].lower()
LOG.debug("Updating group '%(id)s' with "
"'%(update)s'", {'id': id,
'update': status})
try:
notifier = rpc.get_notifier('groupStatusUpdate')
notifier.info(context, 'groups.reset_status.start',
{'id': id,
'update': status})
group = self.group_api.get(context, id)
self.group_api.reset_status(context, group, status)
notifier.info(context, 'groups.reset_status.end',
{'id': id,
'update': status})
except exception.GroupNotFound as error:
# Not found exception will be handled at the wsgi level
notifier.error(context, 'groups.reset_status',
{'error_message': error.msg,
'id': id})
raise
except exception.InvalidGroupStatus as error:
notifier.error(context, 'groups.reset_status',
{'error_message': error.msg,
'id': id})
raise exc.HTTPBadRequest(explanation=error.msg)
return webob.Response(status_int=HTTPStatus.ACCEPTED)
@wsgi.Controller.api_version(mv.GROUP_VOLUME)
@wsgi.action("delete")
@validation.schema(schema.delete)
def delete_group(self, req, id, body):
return self._delete(req, id, body)
def _delete(self, req, id, body):
"""Delete a group."""
LOG.debug('delete called for group %s', id)
context = req.environ['cinder.context']
grp_body = body['delete']
del_vol = strutils.bool_from_string(grp_body.get(
'delete-volumes', False))
LOG.info('Delete group with id: %s', id,
context=context)
try:
group = self.group_api.get(context, id)
self._check_default_cgsnapshot_type(group.group_type_id)
self.group_api.delete(context, group, del_vol)
except exception.GroupNotFound:
# Not found exception will be handled at the wsgi level
raise
except exception.InvalidGroup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
return webob.Response(status_int=HTTPStatus.ACCEPTED)
@wsgi.Controller.api_version(mv.GROUP_VOLUME)
def index(self, req):
"""Returns a summary list of groups."""
return self._get_groups(req, is_detail=False)
@wsgi.Controller.api_version(mv.GROUP_VOLUME)
def detail(self, req):
"""Returns a detailed list of groups."""
return self._get_groups(req, is_detail=True)
def _get_groups(self, req, is_detail):
"""Returns a list of groups through view builder."""
context = req.environ['cinder.context']
filters = req.params.copy()
api_version = req.api_version_request
marker, limit, offset = common.get_pagination_params(filters)
sort_keys, sort_dirs = common.get_sort_params(filters)
filters.pop('list_volume', None)
if api_version.matches(mv.RESOURCE_FILTER):
support_like = (True if api_version.matches(
mv.LIKE_FILTER) else False)
common.reject_invalid_filters(context, filters, 'group',
support_like)
groups = self.group_api.get_all(
context, filters=filters, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs)
new_groups = []
for grp in groups:
try:
# Only show groups not migrated from CGs
self._check_default_cgsnapshot_type(grp.group_type_id)
new_groups.append(grp)
except exc.HTTPBadRequest:
# Skip migrated group
pass
if is_detail:
groups = self._view_builder.detail_list(
req, new_groups)
else:
groups = self._view_builder.summary_list(
req, new_groups)
return groups
@wsgi.Controller.api_version(mv.GROUP_VOLUME)
@wsgi.response(HTTPStatus.ACCEPTED)
@validation.schema(schema.create)
def create(self, req, body):
"""Create a new group."""
LOG.debug('Creating new group %s', body)
context = req.environ['cinder.context']
group = body['group']
name = group.get('name')
description = group.get('description')
if name:
name = name.strip()
if description:
description = description.strip()
group_type = group['group_type']
if not uuidutils.is_uuid_like(group_type):
req_group_type = group_types.get_group_type_by_name(context,
group_type)
group_type = req_group_type['id']
self._check_default_cgsnapshot_type(group_type)
volume_types = group['volume_types']
availability_zone = group.get('availability_zone')
LOG.info("Creating group %(name)s.",
{'name': name},
context=context)
try:
new_group = self.group_api.create(
context, name, description, group_type, volume_types,
availability_zone=availability_zone)
except (exception.Invalid, exception.ObjectActionError) as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.NotFound:
# Not found exception will be handled at the wsgi level
raise
retval = self._view_builder.summary(req, new_group)
return retval
@wsgi.Controller.api_version(mv.GROUP_SNAPSHOTS)
@wsgi.action("create-from-src")
@wsgi.response(HTTPStatus.ACCEPTED)
@validation.schema(schema.create_from_source)
def create_from_src(self, req, body):
"""Create a new group from a source.
The source can be a group snapshot or a group. Note that
this does not require group_type and volume_types as the
"create" API above.
"""
LOG.debug('Creating new group %s.', body)
context = req.environ['cinder.context']
group = body['create-from-src']
name = group.get('name')
description = group.get('description')
if name:
name = name.strip()
if description:
description = description.strip()
group_snapshot_id = group.get('group_snapshot_id', None)
source_group_id = group.get('source_group_id', None)
group_type_id = None
if group_snapshot_id:
LOG.info("Creating group %(name)s from group_snapshot "
"%(snap)s.",
{'name': name, 'snap': group_snapshot_id},
context=context)
grp_snap = self.group_api.get_group_snapshot(context,
group_snapshot_id)
group_type_id = grp_snap.group_type_id
elif source_group_id:
LOG.info("Creating group %(name)s from "
"source group %(source_group_id)s.",
{'name': name, 'source_group_id': source_group_id},
context=context)
source_group = self.group_api.get(context, source_group_id)
group_type_id = source_group.group_type_id
self._check_default_cgsnapshot_type(group_type_id)
try:
new_group = self.group_api.create_from_src(
context, name, description, group_snapshot_id, source_group_id)
except exception.InvalidGroup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except (exception.GroupNotFound, exception.GroupSnapshotNotFound):
# Not found exception will be handled at the wsgi level
raise
except exception.CinderException as error:
raise exc.HTTPBadRequest(explanation=error.msg)
retval = self._view_builder.summary(req, new_group)
return retval
@wsgi.Controller.api_version(mv.GROUP_VOLUME)
@validation.schema(schema.update)
def update(self, req, id, body):
"""Update the group.
Expected format of the input parameter 'body':
.. code-block:: json
{
"group":
{
"name": "my_group",
"description": "My group",
"add_volumes": "volume-uuid-1,volume-uuid-2,...",
"remove_volumes": "volume-uuid-8,volume-uuid-9,..."
}
}
"""
LOG.debug('Update called for group %s.', id)
context = req.environ['cinder.context']
group = body['group']
name = group.get('name')
description = group.get('description')
if name:
name = name.strip()
if description:
description = description.strip()
add_volumes = group.get('add_volumes')
remove_volumes = group.get('remove_volumes')
LOG.info("Updating group %(id)s with name %(name)s "
"description: %(description)s add_volumes: "
"%(add_volumes)s remove_volumes: %(remove_volumes)s.",
{'id': id, 'name': name,
'description': description,
'add_volumes': add_volumes,
'remove_volumes': remove_volumes},
context=context)
try:
group = self.group_api.get(context, id)
self._check_default_cgsnapshot_type(group.group_type_id)
self.group_api.update(
context, group, name, description,
add_volumes, remove_volumes)
except exception.GroupNotFound:
# Not found exception will be handled at the wsgi level
raise
except exception.InvalidGroup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
return webob.Response(status_int=HTTPStatus.ACCEPTED)
@wsgi.Controller.api_version(mv.GROUP_REPLICATION)
@wsgi.action("enable_replication")
@validation.schema(schema.enable_replication)
def enable_replication(self, req, id, body):
"""Enables replications for a group."""
context = req.environ['cinder.context']
LOG.info('Enable replication group with id: %s.', id,
context=context)
try:
group = self.group_api.get(context, id)
self.group_api.enable_replication(context, group)
# Not found exception will be handled at the wsgi level
except (exception.InvalidGroup, exception.InvalidGroupType,
exception.InvalidVolume, exception.InvalidVolumeType) as error:
raise exc.HTTPBadRequest(explanation=error.msg)
return webob.Response(status_int=HTTPStatus.ACCEPTED)
@wsgi.Controller.api_version(mv.GROUP_REPLICATION)
@wsgi.action("disable_replication")
@validation.schema(schema.disable_replication)
def disable_replication(self, req, id, body):
"""Disables replications for a group."""
context = req.environ['cinder.context']
LOG.info('Disable replication group with id: %s.', id,
context=context)
try:
group = self.group_api.get(context, id)
self.group_api.disable_replication(context, group)
# Not found exception will be handled at the wsgi level
except (exception.InvalidGroup, exception.InvalidGroupType,
exception.InvalidVolume, exception.InvalidVolumeType) as error:
raise exc.HTTPBadRequest(explanation=error.msg)
return webob.Response(status_int=HTTPStatus.ACCEPTED)
@wsgi.Controller.api_version(mv.GROUP_REPLICATION)
@wsgi.action("failover_replication")
@validation.schema(schema.failover_replication)
def failover_replication(self, req, id, body):
"""Fails over replications for a group."""
context = req.environ['cinder.context']
grp_body = body['failover_replication']
allow_attached = strutils.bool_from_string(
grp_body.get('allow_attached_volume', False))
secondary_backend_id = grp_body.get('secondary_backend_id')
LOG.info('Failover replication group with id: %s.', id,
context=context)
try:
group = self.group_api.get(context, id)
self.group_api.failover_replication(context, group, allow_attached,
secondary_backend_id)
# Not found exception will be handled at the wsgi level
except (exception.InvalidGroup, exception.InvalidGroupType,
exception.InvalidVolume, exception.InvalidVolumeType) as error:
raise exc.HTTPBadRequest(explanation=error.msg)
return webob.Response(status_int=HTTPStatus.ACCEPTED)
@wsgi.Controller.api_version(mv.GROUP_REPLICATION)
@wsgi.action("list_replication_targets")
@validation.schema(schema.list_replication)
def list_replication_targets(self, req, id, body):
"""List replication targets for a group."""
context = req.environ['cinder.context']
LOG.info('List replication targets for group with id: %s.', id,
context=context)
# Not found exception will be handled at the wsgi level
group = self.group_api.get(context, id)
replication_targets = self.group_api.list_replication_targets(
context, group)
return replication_targets
def create_resource():
return wsgi.Resource(GroupsController())
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/limits.py 0000664 0000000 0000000 00000004063 15131732575 0022613 0 ustar 00root root 0000000 0000000 #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The limits V3 api."""
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.views import limits as limits_views
from cinder import quota
QUOTAS = quota.QUOTAS
class LimitsController(wsgi.Controller):
"""Controller for accessing limits in the OpenStack API."""
def index(self, req):
"""Return all global and rate limit information."""
context = req.environ['cinder.context']
params = req.params.copy()
req_version = req.api_version_request
# TODO(wangxiyuan): Support "tenant_id" here to keep the backwards
# compatibility. Remove it once we drop all support for "tenant".
if (req_version.matches(None,
mv.get_prior_version(mv.LIMITS_ADMIN_FILTER))
or not context.is_admin):
params.pop('project_id', None)
params.pop('tenant_id', None)
project_id = params.get(
'project_id', params.get('tenant_id', context.project_id))
quotas = QUOTAS.get_project_quotas(context, project_id,
usages=False)
abs_limits = {k: v['limit'] for k, v in quotas.items()}
rate_limits = req.environ.get("cinder.limits", [])
builder = self._get_view_builder(req)
return builder.build(rate_limits, abs_limits)
def _get_view_builder(self, req):
return limits_views.ViewBuilder()
def create_resource():
return wsgi.Resource(LimitsController())
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/messages.py 0000664 0000000 0000000 00000010365 15131732575 0023123 0 ustar 00root root 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The messages API."""
from http import HTTPStatus
import webob
from cinder.api import common
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.v3.views import messages as messages_view
from cinder.message import api as message_api
from cinder.message import defined_messages
from cinder.message import message_field
from cinder.policies import messages as policy
class MessagesController(wsgi.Controller):
"""The User Messages API controller for the OpenStack API."""
_view_builder_class = messages_view.ViewBuilder
def __init__(self, ext_mgr):
self.message_api = message_api.API()
self.ext_mgr = ext_mgr
super(MessagesController, self).__init__()
def _build_user_message(self, message):
# NOTE(tommylikehu): if the `action_id` is empty, we use 'event_id'
# to translate the user message.
if message is None:
return
if message['action_id'] is None and message['event_id'] is not None:
message['user_message'] = defined_messages.get_message_text(
message['event_id'])
else:
message['user_message'] = "%s:%s" % (
message_field.translate_action(message['action_id']),
message_field.translate_detail(message['detail_id']))
@wsgi.Controller.api_version(mv.MESSAGES)
def show(self, req, id):
"""Return the given message."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
message = self.message_api.get(context, id)
context.authorize(policy.GET_POLICY, target_obj=message)
self._build_user_message(message)
return self._view_builder.detail(req, message)
@wsgi.Controller.api_version(mv.MESSAGES)
def delete(self, req, id):
"""Delete a message."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
message = self.message_api.get(context, id)
context.authorize(policy.DELETE_POLICY, target_obj=message)
self.message_api.delete(context, id)
return webob.Response(status_int=HTTPStatus.NO_CONTENT)
@wsgi.Controller.api_version(mv.MESSAGES)
def index(self, req):
"""Returns a list of messages, transformed through view builder."""
context = req.environ['cinder.context']
api_version = req.api_version_request
context.authorize(policy.GET_ALL_POLICY)
filters = None
marker = None
limit = None
offset = None
sort_keys = None
sort_dirs = None
if api_version.matches(mv.MESSAGES_PAGINATION):
filters = req.params.copy()
marker, limit, offset = common.get_pagination_params(filters)
sort_keys, sort_dirs = common.get_sort_params(filters)
if api_version.matches(mv.RESOURCE_FILTER):
support_like = (True if api_version.matches(
mv.LIKE_FILTER) else False)
common.reject_invalid_filters(context, filters, 'message',
support_like)
messages = self.message_api.get_all(context, filters=filters,
marker=marker, limit=limit,
offset=offset,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
for message in messages:
self._build_user_message(message)
messages = self._view_builder.index(req, messages)
return messages
def create_resource(ext_mgr):
return wsgi.Resource(MessagesController(ext_mgr))
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/resource_common_manage.py 0000664 0000000 0000000 00000007015 15131732575 0026021 0 ustar 00root root 0000000 0000000 # Copyright (c) 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import common
from cinder.api import extensions
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder import exception
from cinder.i18n import _
class ManageResource(object):
"""Mixin class for v3 of ManageVolume and ManageSnapshot.
It requires that any class inheriting from this one has `volume_api` and
`_list_manageable_view` attributes.
"""
VALID_SORT_KEYS = {'reference', 'size'}
VALID_SORT_DIRS = {'asc', 'desc'}
def _set_resource_type(self, resource):
self._authorizer = extensions.extension_authorizer(resource,
'list_manageable')
self.get_manageable = getattr(self.volume_api,
'get_manageable_%ss' % resource)
def _ensure_min_version(self, req, allowed_version):
version = req.api_version_request
if not version.matches(allowed_version, None):
raise exception.VersionNotFoundForAPIMethod(version=version)
def _get_resources(self, req, is_detail):
self._ensure_min_version(req, mv.MANAGE_EXISTING_LIST)
context = req.environ['cinder.context']
self._authorizer(context)
params = req.params.copy()
cluster_name, host = common.get_cluster_host(
req, params, mv.MANAGE_EXISTING_CLUSTER)
marker, limit, offset = common.get_pagination_params(params)
sort_keys, sort_dirs = common.get_sort_params(params,
default_key='reference')
# These parameters are generally validated at the DB layer, but in this
# case sorting is not done by the DB
invalid_keys = set(sort_keys).difference(self.VALID_SORT_KEYS)
if invalid_keys:
msg = _("Invalid sort keys passed: %s") % ', '.join(invalid_keys)
raise exception.InvalidParameterValue(err=msg)
invalid_dirs = set(sort_dirs).difference(self.VALID_SORT_DIRS)
if invalid_dirs:
msg = _("Invalid sort dirs passed: %s") % ', '.join(invalid_dirs)
raise exception.InvalidParameterValue(err=msg)
resources = self.get_manageable(context, host, cluster_name,
marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys,
sort_dirs=sort_dirs)
view_builder = getattr(self._list_manageable_view,
'detail_list' if is_detail else 'summary_list')
return view_builder(req, resources, len(resources))
@wsgi.extends
def index(self, req):
"""Returns a summary list of volumes available to manage."""
return self._get_resources(req, False)
@wsgi.extends
def detail(self, req):
"""Returns a detailed list of volumes available to manage."""
return self._get_resources(req, True)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/resource_filters.py 0000664 0000000 0000000 00000003065 15131732575 0024672 0 ustar 00root root 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The resource filters api."""
from cinder.api import common
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.v3.views import resource_filters as filter_views
class ResourceFiltersController(wsgi.Controller):
"""The resource filter API controller for the OpenStack API."""
_view_builder_class = filter_views.ViewBuilder
def __init__(self, ext_mgr=None):
"""Initialize controller class."""
self.ext_mgr = ext_mgr
super(ResourceFiltersController, self).__init__()
@wsgi.Controller.api_version(mv.RESOURCE_FILTER_CONFIG)
def index(self, req):
"""Return a list of resource filters."""
resource = req.params.get('resource', None)
filters = common.get_enabled_resource_filters(resource=resource)
return filter_views.ViewBuilder.list(filters)
def create_resource(ext_mgr):
"""Create the wsgi resource for this controller."""
return wsgi.Resource(ResourceFiltersController(ext_mgr))
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/router.py 0000664 0000000 0000000 00000037770 15131732575 0022645 0 ustar 00root root 0000000 0000000 # Copyright 2011 OpenStack Foundation
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
WSGI middleware for OpenStack Volume API.
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import wsgi as base_wsgi
import routes
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.v3 import attachments
from cinder.api.v3 import backups
from cinder.api.v3 import clusters
from cinder.api.v3 import consistencygroups
from cinder.api.v3 import default_types
from cinder.api.v3 import group_snapshots
from cinder.api.v3 import group_specs
from cinder.api.v3 import group_types
from cinder.api.v3 import groups
from cinder.api.v3 import limits
from cinder.api.v3 import messages
from cinder.api.v3 import resource_filters
from cinder.api.v3 import services
from cinder.api.v3 import snapshot_manage
from cinder.api.v3 import snapshot_metadata
from cinder.api.v3 import snapshots
from cinder.api.v3 import types
from cinder.api.v3 import volume_manage
from cinder.api.v3 import volume_metadata
from cinder.api.v3 import volume_transfer
from cinder.api.v3 import volumes
from cinder.api.v3 import workers
from cinder.api import versions
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class ProjectMapper(wsgi.APIMapper):
def resource(self, member_name, collection_name, **kwargs):
"""Base resource path handler
This method is compatible with resource paths that include a
project_id and those that don't. Including project_id in the URLs
was a legacy API requirement; and making API requests against
such endpoints won't work for users that don't belong to a
particular project.
"""
# NOTE: project_id parameter is only valid if its hex or hex + dashes
# (note, integers are a subset of this). This is required to handle
# our overlapping routes issues.
project_id_regex = CONF.project_id_regex
project_id_token = '{project_id:%s}' % project_id_regex
if 'parent_resource' not in kwargs:
kwargs['path_prefix'] = '%s/' % project_id_token
else:
parent_resource = kwargs['parent_resource']
p_collection = parent_resource['collection_name']
p_member = parent_resource['member_name']
kwargs['path_prefix'] = '%s/%s/:%s_id' % (
project_id_token, p_collection, p_member
)
routes.Mapper.resource(
self, member_name, collection_name, **kwargs
)
# Add additional routes without project_id.
if 'parent_resource' not in kwargs:
del kwargs['path_prefix']
else:
parent_resource = kwargs['parent_resource']
p_collection = parent_resource['collection_name']
p_member = parent_resource['member_name']
kwargs['path_prefix'] = '%s/:%s_id' % (p_collection, p_member)
routes.Mapper.resource(
self, member_name, collection_name, **kwargs
)
class APIRouter(base_wsgi.Router):
"""Routes requests on the API to the appropriate controller and method."""
def __init__(self, ext_mgr=None):
ext_mgr = ext_mgr or extensions.ExtensionManager()
mapper = ProjectMapper()
self.resources = {}
self._setup_routes(mapper, ext_mgr)
self._setup_ext_routes(mapper, ext_mgr)
self._setup_extensions(ext_mgr)
super().__init__(mapper)
@classmethod
def factory(cls, global_config, **local_config):
"""Simple paste factory.
:class:`oslo_service.wsgi.Router` doesn't have this.
"""
return cls()
def _setup_ext_routes(self, mapper, ext_mgr):
for resource in ext_mgr.get_resources():
LOG.debug('Extended resource: %s', resource.collection)
wsgi_resource = wsgi.Resource(resource.controller)
self.resources[resource.collection] = wsgi_resource
kargs = dict(
controller=wsgi_resource,
collection=resource.collection_actions,
member=resource.member_actions)
if resource.parent:
kargs['parent_resource'] = resource.parent
mapper.resource(resource.collection, resource.collection, **kargs)
if resource.custom_routes_fn:
resource.custom_routes_fn(mapper, wsgi_resource)
def _setup_extensions(self, ext_mgr):
for extension in ext_mgr.get_controller_extensions():
collection = extension.collection
controller = extension.controller
if collection not in self.resources:
LOG.warning(
'Extension %(ext_name)s: Cannot extend '
'resource %(collection)s: No such resource',
{
'ext_name': extension.extension.name,
'collection': collection,
},
)
continue
LOG.debug(
'Extension %(ext_name)s extending resource: %(collection)s',
{
'ext_name': extension.extension.name,
'collection': collection,
},
)
resource = self.resources[collection]
resource.register_extensions(controller)
def _setup_routes(self, mapper, ext_mgr):
self.resources['versions'] = versions.create_resource()
mapper.connect("versions", "/",
controller=self.resources['versions'],
action='index')
mapper.redirect("", "/")
self.resources['volumes'] = volumes.create_resource(ext_mgr)
mapper.resource("volume", "volumes",
controller=self.resources['volumes'],
collection={'detail': 'GET', 'summary': 'GET'},
member={'action': 'POST'})
self.resources['messages'] = messages.create_resource(ext_mgr)
mapper.resource("message", "messages",
controller=self.resources['messages'])
self.resources['clusters'] = clusters.create_resource()
mapper.create_route(
'/clusters', 'GET', self.resources['clusters'], 'index')
mapper.create_route(
'/clusters/detail', 'GET', self.resources['clusters'], 'detail')
mapper.create_route(
'/clusters/{id}', 'GET', self.resources['clusters'], 'show')
mapper.create_route(
'/clusters/enable', 'PUT', self.resources['clusters'], 'enable')
mapper.create_route(
'/clusters/disable', 'PUT', self.resources['clusters'], 'disable')
self.resources['types'] = types.create_resource()
mapper.resource("type", "types",
controller=self.resources['types'],
member={'action': 'POST'})
self.resources['group_types'] = group_types.create_resource()
mapper.resource("group_type", "group_types",
controller=self.resources['group_types'])
self.resources['group_specs'] = group_specs.create_resource()
mapper.resource("group_spec", "group_specs",
controller=self.resources['group_specs'],
parent_resource=dict(member_name='group_type',
collection_name='group_types'))
self.resources['groups'] = groups.create_resource()
mapper.resource("group", "groups",
controller=self.resources['groups'],
collection={'detail': 'GET'},
member={'action': 'POST'})
for path_prefix in ['/{project_id}', '']:
# project_id is optional
mapper.connect("groups",
"%s/groups/{id}/action" % path_prefix,
controller=self.resources["groups"],
action="action",
conditions={"method": ["POST"]})
mapper.connect("groups/action",
"%s/groups/action" % path_prefix,
controller=self.resources["groups"],
action="action",
conditions={"method": ["POST"]})
self.resources['group_snapshots'] = group_snapshots.create_resource()
mapper.resource("group_snapshot", "group_snapshots",
controller=self.resources['group_snapshots'],
collection={'detail': 'GET'},
member={'action': 'POST'})
for path_prefix in ['/{project_id}', '']:
# project_id is optional
mapper.connect("group_snapshots",
"%s/group_snapshots/{id}/action" % path_prefix,
controller=self.resources["group_snapshots"],
action="action",
conditions={"method": ["POST"]})
self.resources['snapshots'] = snapshots.create_resource(ext_mgr)
mapper.resource("snapshot", "snapshots",
controller=self.resources['snapshots'],
collection={'detail': 'GET'},
member={'action': 'POST'})
self.resources['limits'] = limits.create_resource()
mapper.resource("limit", "limits",
controller=self.resources['limits'])
self.resources['snapshot_metadata'] = \
snapshot_metadata.create_resource()
snapshot_metadata_controller = self.resources['snapshot_metadata']
mapper.resource("snapshot_metadata", "metadata",
controller=snapshot_metadata_controller,
parent_resource=dict(member_name='snapshot',
collection_name='snapshots'))
for path_prefix in ['/{project_id}', '']:
# project_id is optional
mapper.connect("metadata",
"%s/snapshots/{snapshot_id}/metadata" % path_prefix,
controller=snapshot_metadata_controller,
action='update_all',
conditions={"method": ['PUT']})
self.resources['volume_metadata'] = volume_metadata.create_resource()
volume_metadata_controller = self.resources['volume_metadata']
mapper.resource("volume_metadata", "metadata",
controller=volume_metadata_controller,
parent_resource=dict(member_name='volume',
collection_name='volumes'))
for path_prefix in ['/{project_id}', '']:
# project_id is optional
mapper.connect("metadata",
"%s/volumes/{volume_id}/metadata" % path_prefix,
controller=volume_metadata_controller,
action='update_all',
conditions={"method": ['PUT']})
self.resources['consistencygroups'] = (
consistencygroups.create_resource())
mapper.resource("consistencygroup", "consistencygroups",
controller=self.resources['consistencygroups'],
collection={'detail': 'GET'},
member={'action': 'POST'})
self.resources['manageable_volumes'] = volume_manage.create_resource()
mapper.resource("manageable_volume", "manageable_volumes",
controller=self.resources['manageable_volumes'],
collection={'detail': 'GET'})
self.resources['manageable_snapshots'] = \
snapshot_manage.create_resource()
mapper.resource("manageable_snapshot", "manageable_snapshots",
controller=self.resources['manageable_snapshots'],
collection={'detail': 'GET'})
self.resources['backups'] = (
backups.create_resource())
mapper.resource("backup", "backups",
controller=self.resources['backups'],
collection={'detail': 'GET'})
self.resources['attachments'] = attachments.create_resource(ext_mgr)
mapper.resource("attachment", "attachments",
controller=self.resources['attachments'],
collection={'detail': 'GET', 'summary': 'GET'},
member={'action': 'POST'})
self.resources['workers'] = workers.create_resource()
mapper.resource('worker', 'workers',
controller=self.resources['workers'],
collection={'cleanup': 'POST'})
self.resources['resource_filters'] = resource_filters.create_resource(
ext_mgr)
mapper.resource('resource_filter', 'resource_filters',
controller=self.resources['resource_filters'])
self.resources['volume_transfers'] = (
volume_transfer.create_resource())
mapper.resource("volume-transfer", "volume-transfers",
controller=self.resources['volume_transfers'],
collection={'detail': 'GET'},
member={'accept': 'POST'})
self.resources['default_types'] = default_types.create_resource()
for path_prefix in ['/{project_id}', '']:
# project_id is optional
mapper.connect(
"default-types", "%s/default-types/{id}" % path_prefix,
controller=self.resources['default_types'],
action='create_update',
conditions={"method": ['PUT']})
mapper.connect(
"default-types", "%s/default-types" % path_prefix,
controller=self.resources['default_types'],
action='index',
conditions={"method": ['GET']})
mapper.connect(
"default-types", "%s/default-types/{id}" % path_prefix,
controller=self.resources['default_types'],
action='detail',
conditions={"method": ['GET']})
mapper.connect(
"default-types", "%s/default-types/{id}" % path_prefix,
controller=self.resources['default_types'],
action='delete',
conditions={"method": ['DELETE']})
# TODO: These really shouldn't be configurable nowadays, but removing
# this is a separate effort.
if ext_mgr.is_loaded('os-services'):
resource = services.create_resource(ext_mgr)
mapper.create_route(
'/os-services', 'GET', resource, 'index')
mapper.create_route(
'/os-services/enable', 'PUT', resource, 'enable')
mapper.create_route(
'/os-services/disable', 'PUT', resource, 'disable')
mapper.create_route(
'/os-services/disable-log-reason', 'PUT', resource,
'disable_log_reason')
mapper.create_route(
'/os-services/get-log', 'PUT', resource, 'get_log')
mapper.create_route(
'/os-services/set-log', 'PUT', resource, 'set_log')
mapper.create_route(
'/os-services/freeze', 'PUT', resource, 'freeze')
mapper.create_route(
'/os-services/thaw', 'PUT', resource, 'thaw')
mapper.create_route(
'/os-services/failover_host', 'PUT', resource, 'failover_host')
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/services.py 0000664 0000000 0000000 00000030715 15131732575 0023140 0 ustar 00root root 0000000 0000000 # Copyright 2012 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import HTTPStatus
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
import webob.exc
from cinder.api import common
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import services as schema
from cinder.api import validation
from cinder.backup import rpcapi as backup_rpcapi
from cinder.common import constants
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.policies import services as policy
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder import utils
from cinder import volume
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import volume_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class ServiceController(wsgi.Controller):
def __init__(self, ext_mgr=None):
self.ext_mgr = ext_mgr
super().__init__()
self.volume_api = volume.API()
self.rpc_apis = {
constants.SCHEDULER_BINARY: scheduler_rpcapi.SchedulerAPI(),
constants.VOLUME_BINARY: volume_rpcapi.VolumeAPI(),
constants.BACKUP_BINARY: backup_rpcapi.BackupAPI(),
}
def index(self, req):
"""Return a list of all running services.
Filter by host & service name.
"""
context = req.environ['cinder.context']
context.authorize(policy.GET_ALL_POLICY)
detailed = self.ext_mgr.is_loaded('os-extended-services')
now = timeutils.utcnow(with_timezone=True)
filters = {}
if 'host' in req.GET:
filters['host'] = req.GET['host']
if 'binary' in req.GET:
filters['binary'] = req.GET['binary']
services = objects.ServiceList.get_all(context, filters)
# Get backend state from scheduler
if req.api_version_request.matches(mv.BACKEND_STATE_REPORT):
backend_state_map = {}
scheduler_api = self.rpc_apis[constants.SCHEDULER_BINARY]
pools = scheduler_api.get_pools(context)
for pool in pools:
backend_name = volume_utils.extract_host(pool.get("name"))
back_state = pool.get('capabilities', {}).get('backend_state',
'up')
backend_state_map[backend_name] = back_state
svcs = []
for svc in services:
updated_at = svc.updated_at
delta = now - (svc.updated_at or svc.created_at)
delta_sec = delta.total_seconds()
if svc.modified_at:
delta_mod = now - svc.modified_at
if abs(delta_sec) >= abs(delta_mod.total_seconds()):
updated_at = svc.modified_at
alive = abs(delta_sec) <= CONF.service_down_time
art = "up" if alive else "down"
active = 'enabled'
if svc.disabled:
active = 'disabled'
if updated_at:
updated_at = timeutils.normalize_time(updated_at)
ret_fields = {'binary': svc.binary, 'host': svc.host,
'zone': svc.availability_zone,
'status': active, 'state': art,
'updated_at': updated_at}
if (req.api_version_request.matches(mv.BACKEND_STATE_REPORT) and
svc.binary == constants.VOLUME_BINARY):
ret_fields['backend_state'] = backend_state_map.get(svc.host)
# On CLUSTER_SUPPORT we added cluster support
if req.api_version_request.matches(mv.CLUSTER_SUPPORT):
ret_fields['cluster'] = svc.cluster_name
if detailed:
ret_fields['disabled_reason'] = svc.disabled_reason
if svc.binary == constants.VOLUME_BINARY:
ret_fields['replication_status'] = svc.replication_status
ret_fields['active_backend_id'] = svc.active_backend_id
ret_fields['frozen'] = svc.frozen
svcs.append(ret_fields)
return {'services': svcs}
def _volume_api_proxy(self, fun, *args):
try:
return fun(*args)
except exception.ServiceNotFound as ex:
raise exception.InvalidInput(ex.msg)
# TODO: This currently returns HTTP 200 but it should return HTTP 204 since
# there's no content
@validation.schema(schema.freeze_and_thaw)
def freeze(self, req, body):
context = req.environ['cinder.context']
context.authorize(policy.UPDATE_POLICY)
cluster_name, host = common.get_cluster_host(
req, body, mv.REPLICATION_CLUSTER)
return self._volume_api_proxy(self.volume_api.freeze_host, context,
host, cluster_name)
# TODO: This currently returns HTTP 200 but it should return HTTP 204 since
# there's no content
@validation.schema(schema.freeze_and_thaw)
def thaw(self, req, body):
context = req.environ['cinder.context']
context.authorize(policy.UPDATE_POLICY)
cluster_name, host = common.get_cluster_host(
req, body, mv.REPLICATION_CLUSTER)
return self._volume_api_proxy(self.volume_api.thaw_host, context,
host, cluster_name)
@validation.schema(schema.failover_host)
def failover_host(self, req, body):
context = req.environ['cinder.context']
context.authorize(policy.UPDATE_POLICY)
clustered = req.api_version_request.matches(mv.REPLICATION_CLUSTER)
# We set version to None to always get the cluster name from the body,
# to False when we don't want to get it, and REPLICATION_CLUSTER when
# we only want it if the requested version is REPLICATION_CLUSTER or
# higher.
version = mv.REPLICATION_CLUSTER if clustered else False
cluster_name, host = common.get_cluster_host(req, body, version)
self._volume_api_proxy(self.volume_api.failover, context, host,
cluster_name, body.get('backend_id'))
return webob.Response(status_int=HTTPStatus.ACCEPTED)
def _log_params_binaries_services(self, context, body):
"""Get binaries and services referred by given log set/get request."""
query_filters = {'is_up': True}
binary = body.get('binary')
binaries = []
if binary in ('*', None, ''):
binaries = constants.LOG_BINARIES
elif binary == constants.API_BINARY:
return [binary], []
elif binary in constants.LOG_BINARIES:
binaries = [binary]
query_filters['binary'] = binary
server = body.get('server')
if server:
query_filters['host_or_cluster'] = server
services = objects.ServiceList.get_all(context, filters=query_filters)
return binaries, services
@wsgi.Controller.api_version(mv.LOG_LEVEL)
@validation.schema(schema.set_log)
def set_log(self, req, body):
"""Set log levels of services dynamically."""
context = req.environ['cinder.context']
context.authorize(policy.UPDATE_POLICY)
prefix = body.get('prefix')
level = body.get('level')
binaries, services = self._log_params_binaries_services(context, body)
log_req = objects.LogLevel(context, prefix=prefix, level=level)
if constants.API_BINARY in binaries:
utils.set_log_levels(prefix, level)
for service in services:
self.rpc_apis[service.binary].set_log_levels(context,
service, log_req)
return webob.Response(status_int=HTTPStatus.ACCEPTED)
@wsgi.Controller.api_version(mv.LOG_LEVEL)
@validation.schema(schema.get_log)
def get_log(self, req, body):
"""Get current log levels for services."""
context = req.environ['cinder.context']
context.authorize(policy.UPDATE_POLICY)
prefix = body.get('prefix')
binaries, services = self._log_params_binaries_services(context, body)
result = []
log_req = objects.LogLevel(context, prefix=prefix)
# Avoid showing constants if 'server' is set.
server_filter = body.get('server')
if not server_filter or server_filter == CONF.host:
if constants.API_BINARY in binaries:
levels = utils.get_log_levels(prefix)
result.append({'host': CONF.host,
'binary': constants.API_BINARY,
'levels': levels})
for service in services:
levels = self.rpc_apis[service.binary].get_log_levels(context,
service,
log_req)
result.append({'host': service.host,
'binary': service.binary,
'levels': {le.prefix: le.level for le in levels}})
return {'log_levels': result}
@validation.schema(schema.disable_log_reason)
def disable_log_reason(self, req, body):
context = req.environ['cinder.context']
context.authorize(policy.UPDATE_POLICY)
if not self.ext_mgr.is_loaded('os-extended-services'):
return exception.InvalidInput(reason=_('Unknown action'))
host = common.get_cluster_host(req, body, False)[1]
disabled_reason = body.get('disabled_reason')
# NOTE(uni): deprecating service request key, binary takes precedence
# Still keeping service key here for API compatibility sake.
service = body.get('service', '')
binary = body.get('binary', '')
binary_key = binary or service
# Not found exception will be handled at the wsgi level
svc = objects.Service.get_by_args(context, host, binary_key)
svc.disabled = True
if disabled_reason:
svc.disabled_reason = disabled_reason
svc.save()
return {
'host': host,
'service': service,
'binary': binary,
'status': 'disabled',
'disabled': True,
'disabled_reason': disabled_reason,
}
@validation.schema(schema.enable_and_disable)
def enable(self, req, body):
context = req.environ['cinder.context']
context.authorize(policy.UPDATE_POLICY)
host = common.get_cluster_host(req, body, False)[1]
# NOTE(uni): deprecating service request key, binary takes precedence
# Still keeping service key here for API compatibility sake.
service = body.get('service', '')
binary = body.get('binary', '')
binary_key = binary or service
# Not found exception will be handled at the wsgi level
svc = objects.Service.get_by_args(context, host, binary_key)
svc.save()
return {
'host': host,
'service': service,
'binary': binary,
'status': 'enabled',
'disabled': False,
}
@validation.schema(schema.enable_and_disable)
def disable(self, req, body):
context = req.environ['cinder.context']
context.authorize(policy.UPDATE_POLICY)
host = common.get_cluster_host(req, body, False)[1]
# NOTE(uni): deprecating service request key, binary takes precedence
# Still keeping service key here for API compatibility sake.
service = body.get('service', '')
binary = body.get('binary', '')
binary_key = binary or service
# Not found exception will be handled at the wsgi level
svc = objects.Service.get_by_args(context, host, binary_key)
svc.save()
return {
'host': host,
'service': service,
'binary': binary,
'status': 'disabled',
'disabled': True,
}
def create_resource(ext_mgr):
return wsgi.Resource(ServiceController(ext_mgr))
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/snapshot_manage.py 0000664 0000000 0000000 00000002740 15131732575 0024461 0 ustar 00root root 0000000 0000000 # Copyright (c) 2016 Stratoscale, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import HTTPStatus
from cinder.api.contrib import snapshot_manage
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import snapshot_manage as schema
from cinder.api.v3 import resource_common_manage as common
from cinder.api import validation
class SnapshotManageController(
common.ManageResource, snapshot_manage.SnapshotManageController
):
def __init__(self, *args, **kwargs):
super(SnapshotManageController, self).__init__(*args, **kwargs)
self._set_resource_type('snapshot')
@wsgi.response(HTTPStatus.ACCEPTED)
@validation.schema(schema.create)
def create(self, req, body):
self._ensure_min_version(req, mv.MANAGE_EXISTING_LIST)
return super(SnapshotManageController, self).create(req, body=body)
def create_resource():
return wsgi.Resource(SnapshotManageController())
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/snapshot_metadata.py 0000664 0000000 0000000 00000012601 15131732575 0025006 0 ustar 00root root 0000000 0000000 # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import HTTPStatus
import webob
from webob import exc
from cinder.api.openstack import wsgi
from cinder.api.schemas import snapshot_metadata as schema
from cinder.api import validation
from cinder import exception
from cinder.i18n import _
from cinder import volume
class SnapshotMetadataController(wsgi.Controller):
"""The snapshot metadata API controller for the OpenStack API."""
def __init__(self):
self.volume_api = volume.API()
super().__init__()
def _get_metadata(self, context, snapshot_id):
return self._get_snapshot_and_metadata(context, snapshot_id)[1]
def _get_snapshot_and_metadata(self, context, snapshot_id):
# Not found exception will be handled at the wsgi level
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
meta = self.volume_api.get_snapshot_metadata(context, snapshot)
return snapshot, meta
def index(self, req, snapshot_id):
"""Returns the list of metadata for a given snapshot."""
context = req.environ['cinder.context']
return {'metadata': self._get_metadata(context, snapshot_id)}
@validation.schema(schema.create)
def create(self, req, snapshot_id, body):
context = req.environ['cinder.context']
metadata = body['metadata']
new_metadata = self._update_snapshot_metadata(context,
snapshot_id,
metadata,
delete=False)
return {'metadata': new_metadata}
@validation.schema(schema.update)
def update(self, req, snapshot_id, id, body):
meta_item = body['meta']
if id not in meta_item:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
if len(meta_item) > 1:
expl = _('Request body contains too many items')
raise exc.HTTPBadRequest(explanation=expl)
context = req.environ['cinder.context']
self._update_snapshot_metadata(context,
snapshot_id,
meta_item,
delete=False)
return {'meta': meta_item}
@validation.schema(schema.update_all)
def update_all(self, req, snapshot_id, body):
context = req.environ['cinder.context']
metadata = body['metadata']
new_metadata = self._update_snapshot_metadata(context,
snapshot_id,
metadata,
delete=True)
return {'metadata': new_metadata}
def _update_snapshot_metadata(self, context,
snapshot_id, metadata,
delete=False):
try:
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
return self.volume_api.update_snapshot_metadata(context,
snapshot,
metadata,
delete)
# Not found exception will be handled at the wsgi level
except (ValueError, AttributeError):
msg = _("Malformed request body")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InvalidVolumeMetadata as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.InvalidVolumeMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(explanation=error.msg)
def show(self, req, snapshot_id, id):
"""Return a single metadata item."""
context = req.environ['cinder.context']
data = self._get_metadata(context, snapshot_id)
try:
return {'meta': {id: data[id]}}
except KeyError:
raise exception.SnapshotMetadataNotFound(snapshot_id=snapshot_id,
metadata_key=id)
def delete(self, req, snapshot_id, id):
"""Deletes an existing metadata."""
context = req.environ['cinder.context']
snapshot, metadata = self._get_snapshot_and_metadata(context,
snapshot_id)
if id not in metadata:
raise exception.SnapshotMetadataNotFound(snapshot_id=snapshot_id,
metadata_key=id)
self.volume_api.delete_snapshot_metadata(context, snapshot, id)
return webob.Response(status_int=HTTPStatus.OK)
def create_resource():
return wsgi.Resource(SnapshotMetadataController())
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/snapshots.py 0000664 0000000 0000000 00000023555 15131732575 0023343 0 ustar 00root root 0000000 0000000 # Copyright 2016 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes snapshots V3 API."""
import ast
from http import HTTPStatus
from oslo_log import log as logging
from oslo_utils import strutils
import webob
from webob import exc
from cinder.api import api_utils
from cinder.api import common
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import snapshots as schema
from cinder.api.v3.views import snapshots as snapshot_views
from cinder.api import validation
from cinder import utils
from cinder import volume
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
SNAPSHOT_IN_USE_FLAG_MSG = (
f"Since microversion {mv.SNAPSHOT_IN_USE} the 'force' flag is "
f"invalid for this request. For backward compatability, however, when "
f"the 'force' flag is passed with a value evaluating to True, it is "
f"silently ignored."
)
class SnapshotsController(wsgi.Controller):
"""The Snapshots API controller for the OpenStack API."""
_view_builder_class = snapshot_views.ViewBuilder
def __init__(self, ext_mgr=None):
self.volume_api = volume.API()
self.ext_mgr = ext_mgr
super(SnapshotsController, self).__init__()
def show(self, req, id):
"""Return data about the given snapshot."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
snapshot = self.volume_api.get_snapshot(context, id)
req.cache_db_snapshot(snapshot)
return self._view_builder.detail(req, snapshot)
def delete(self, req, id):
"""Delete a snapshot."""
context = req.environ['cinder.context']
LOG.info("Delete snapshot with id: %s", id)
# Not found exception will be handled at the wsgi level
snapshot = self.volume_api.get_snapshot(context, id)
self.volume_api.delete_snapshot(context, snapshot)
return webob.Response(status_int=HTTPStatus.ACCEPTED)
def _get_snapshot_filter_options(self):
"""returns tuple of valid filter options"""
return ('status', 'volume_id', 'name', 'metadata')
def _format_snapshot_filter_options(self, search_opts):
"""Convert valid filter options to correct expected format"""
# Get the dict object out of queried metadata
# convert metadata query value from string to dict
if 'metadata' in search_opts.keys():
try:
search_opts['metadata'] = ast.literal_eval(
search_opts['metadata'])
except (ValueError, SyntaxError):
LOG.debug('Could not evaluate value %s, assuming string',
search_opts['metadata'])
if 'use_quota' in search_opts:
search_opts['use_quota'] = utils.get_bool_param('use_quota',
search_opts)
MV_ADDED_FILTERS = (
(mv.get_prior_version(mv.SNAPSHOT_LIST_METADATA_FILTER), 'metadata'),
# REST API receives consumes_quota, but process_general_filtering
# transforms it into use_quota
(mv.get_prior_version(mv.USE_QUOTA), 'use_quota'),
)
@common.process_general_filtering('snapshot')
def _process_snapshot_filtering(self, context=None, filters=None,
req_version=None):
"""Formats allowed filters"""
for version, field in self.MV_ADDED_FILTERS:
if req_version.matches(None, version):
filters.pop(field, None)
# Filter out invalid options
allowed_search_options = self._get_snapshot_filter_options()
api_utils.remove_invalid_filter_options(context, filters,
allowed_search_options)
def _items(self, req, is_detail=True):
"""Returns a list of snapshots, transformed through view builder."""
context = req.environ['cinder.context']
# Pop out non search_opts and create local variables
search_opts = req.GET.copy()
sort_keys, sort_dirs = common.get_sort_params(search_opts)
marker, limit, offset = common.get_pagination_params(search_opts)
req_version = req.api_version_request
show_count = False
if req_version.matches(
mv.SUPPORT_COUNT_INFO) and 'with_count' in search_opts:
show_count = utils.get_bool_param('with_count', search_opts)
search_opts.pop('with_count')
# process filters
self._process_snapshot_filtering(context=context,
filters=search_opts,
req_version=req_version)
# process snapshot filters to appropriate formats if required
self._format_snapshot_filter_options(search_opts)
req_version = req.api_version_request
if req_version.matches(mv.SNAPSHOT_SORT, None) and 'name' in sort_keys:
sort_keys[sort_keys.index('name')] = 'display_name'
# NOTE(thingee): v3 API allows name instead of display_name
if 'name' in search_opts:
search_opts['display_name'] = search_opts.pop('name')
snapshots = self.volume_api.get_all_snapshots(
context,
search_opts=search_opts.copy(),
marker=marker,
limit=limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
offset=offset)
total_count = None
if show_count:
total_count = self.volume_api.calculate_resource_count(
context, 'snapshot', search_opts)
req.cache_db_snapshots(snapshots.objects)
if is_detail:
snapshots = self._view_builder.detail_list(req, snapshots.objects,
total_count)
else:
snapshots = self._view_builder.summary_list(req, snapshots.objects,
total_count)
return snapshots
def index(self, req):
"""Returns a summary list of snapshots."""
return self._items(req, is_detail=False)
def detail(self, req):
"""Returns a detailed list of snapshots."""
return self._items(req, is_detail=True)
@wsgi.response(HTTPStatus.ACCEPTED)
@validation.schema(schema.create)
def create(self, req, body):
"""Creates a new snapshot."""
kwargs = {}
context = req.environ['cinder.context']
snapshot = body['snapshot']
kwargs['metadata'] = snapshot.get('metadata', None)
volume_id = snapshot['volume_id']
volume = self.volume_api.get(context, volume_id)
req_version = req.api_version_request
force_flag = snapshot.get('force')
force = False
if force_flag is not None:
# note: this won't raise because it passed schema validation
force = strutils.bool_from_string(force_flag, strict=True)
if req_version.matches(mv.SNAPSHOT_IN_USE):
# strictly speaking, the 'force' flag is invalid for
# mv.SNAPSHOT_IN_USE, but we silently ignore a True
# value for backward compatibility
if force is False:
raise exc.HTTPBadRequest(
explanation=SNAPSHOT_IN_USE_FLAG_MSG)
LOG.info("Create snapshot from volume %s", volume_id)
self.clean_name_and_description(snapshot)
if 'name' in snapshot:
snapshot['display_name'] = snapshot.pop('name')
if force:
new_snapshot = self.volume_api.create_snapshot_force(
context,
volume,
snapshot.get('display_name'),
snapshot.get('description'),
**kwargs)
else:
if req_version.matches(mv.SNAPSHOT_IN_USE):
kwargs['allow_in_use'] = True
new_snapshot = self.volume_api.create_snapshot(
context,
volume,
snapshot.get('display_name'),
snapshot.get('description'),
**kwargs)
req.cache_db_snapshot(new_snapshot)
return self._view_builder.detail(req, new_snapshot)
@validation.schema(schema.update)
def update(self, req, id, body):
"""Update a snapshot."""
context = req.environ['cinder.context']
snapshot_body = body['snapshot']
self.clean_name_and_description(snapshot_body)
if 'name' in snapshot_body:
snapshot_body['display_name'] = snapshot_body.pop('name')
if 'description' in snapshot_body:
snapshot_body['display_description'] = snapshot_body.pop(
'description')
# Not found exception will be handled at the wsgi level
snapshot = self.volume_api.get_snapshot(context, id)
volume_utils.notify_about_snapshot_usage(context, snapshot,
'update.start')
self.volume_api.update_snapshot(context, snapshot, snapshot_body)
snapshot.update(snapshot_body)
req.cache_db_snapshot(snapshot)
volume_utils.notify_about_snapshot_usage(context, snapshot,
'update.end')
return self._view_builder.detail(req, snapshot)
def create_resource(ext_mgr):
return wsgi.Resource(SnapshotsController(ext_mgr))
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/types.py 0000664 0000000 0000000 00000013104 15131732575 0022452 0 ustar 00root root 0000000 0000000 # Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volume type & volume types extra specs extension."""
import ast
from oslo_log import log as logging
from cinder.api import api_utils
from cinder.api import common
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.v3.views import types as views_types
from cinder import exception
from cinder.i18n import _
from cinder.policies import type_extra_specs as extra_specs_policy
from cinder.policies import volume_type as type_policy
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
class VolumeTypesController(wsgi.Controller):
"""The volume types API controller for the OpenStack API."""
_view_builder_class = views_types.ViewBuilder
def index(self, req):
"""Returns the list of volume types."""
context = req.environ['cinder.context']
context.authorize(type_policy.GET_ALL_POLICY)
limited_types = self._get_volume_types(req)
req.cache_resource(limited_types, name='types')
return self._view_builder.index(req, limited_types)
def show(self, req, id):
"""Return a single volume type item."""
context = req.environ['cinder.context']
# get default volume type
if id is not None and id == 'default':
vol_type = volume_types.get_default_volume_type(context)
if not vol_type:
msg = _("Default volume type can not be found.")
raise exception.VolumeTypeNotFound(message=msg)
req.cache_resource(vol_type, name='types')
else:
# Not found exception will be handled at wsgi level
vol_type = volume_types.get_volume_type(context, id)
req.cache_resource(vol_type, name='types')
context.authorize(type_policy.GET_POLICY, target_obj=vol_type)
return self._view_builder.show(req, vol_type)
@common.process_general_filtering('volume_type')
def _process_volume_type_filtering(self, context=None, filters=None,
req_version=None):
api_utils.remove_invalid_filter_options(
context,
filters,
self._get_vol_type_filter_options())
def _get_volume_types(self, req):
"""Helper function that returns a list of type dicts."""
params = req.params.copy()
marker, limit, offset = common.get_pagination_params(params)
sort_keys, sort_dirs = common.get_sort_params(params)
filters = params
context = req.environ['cinder.context']
req_version = req.api_version_request
if req_version.matches(mv.SUPPORT_VOLUME_TYPE_FILTER):
self._process_volume_type_filtering(context=context,
filters=filters,
req_version=req_version)
else:
api_utils.remove_invalid_filter_options(
context, filters, self._get_vol_type_filter_options())
if context.is_admin:
# Only admin has query access to all volume types
filters['is_public'] = api_utils._parse_is_public(
req.params.get('is_public', None))
else:
filters['is_public'] = True
if 'extra_specs' in filters:
try:
filters['extra_specs'] = ast.literal_eval(
filters['extra_specs'])
except (ValueError, SyntaxError):
LOG.debug('Could not evaluate "extra_specs" %s, assuming '
'dictionary string.', filters['extra_specs'])
# Do not allow sensitive extra specs to be used in a filter if
# the context only allows access to user visible extra specs.
# Removing the filter would yield inaccurate results, so an
# empty result is returned because as far as an unauthorized
# user goes, the list of volume-types meeting their filtering
# criteria is empty.
if not context.authorize(extra_specs_policy.READ_SENSITIVE_POLICY,
fatal=False):
for k in filters['extra_specs'].keys():
if k not in extra_specs_policy.USER_VISIBLE_EXTRA_SPECS:
return []
limited_types = volume_types.get_all_types(context,
filters=filters,
marker=marker, limit=limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
offset=offset,
list_result=True)
return limited_types
def _get_vol_type_filter_options(self):
"""Return volume type search options allowed by non-admin."""
return ['is_public']
def create_resource():
return wsgi.Resource(VolumeTypesController())
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/views/ 0000775 0000000 0000000 00000000000 15131732575 0022072 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/views/__init__.py 0000664 0000000 0000000 00000000000 15131732575 0024171 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/views/attachments.py 0000664 0000000 0000000 00000004003 15131732575 0024754 0 ustar 00root root 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import timeutils
class ViewBuilder(object):
"""Model an attachment API response as a python dictionary."""
_collection_name = "attachments"
@staticmethod
def _normalize(date):
if date:
return timeutils.normalize_time(date)
return ''
@classmethod
def detail(cls, attachment, flat=False):
"""Detailed view of an attachment."""
result = cls.summary(attachment, flat=True)
result.update(
attached_at=cls._normalize(attachment.attach_time),
detached_at=cls._normalize(attachment.detach_time),
attach_mode=attachment.attach_mode,
connection_info=attachment.connection_info)
if flat:
return result
return {'attachment': result}
@staticmethod
def summary(attachment, flat=False):
"""Non detailed view of an attachment."""
result = {
'id': attachment.id,
'status': attachment.attach_status,
'instance': attachment.instance_uuid,
'volume_id': attachment.volume_id, }
if flat:
return result
return {'attachment': result}
@classmethod
def list(cls, attachments, detail=False):
"""Build a view of a list of attachments."""
func = cls.detail if detail else cls.summary
return {'attachments': [func(attachment, flat=True) for attachment in
attachments]}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/views/backups.py 0000664 0000000 0000000 00000003240 15131732575 0024073 0 ustar 00root root 0000000 0000000 # Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import microversions as mv
from cinder.api.views import backups as views_v2
from cinder.common import constants as cinder_constants
class ViewBuilder(views_v2.ViewBuilder):
"""Model a backups API V3 response as a python dictionary."""
def detail(self, request, backup):
"""Detailed view of a single backup."""
backup_ref = super(ViewBuilder, self).detail(request, backup)
# Add metadata if min version is greater than or equal to
# BACKUP_METADATA.
req_version = request.api_version_request
if req_version.matches(mv.BACKUP_METADATA):
backup_ref['backup']['metadata'] = backup.metadata
if req_version.matches(mv.ENCRYPTION_KEY_ID_IN_DETAILS, None):
encryption_key_id = backup.get('encryption_key_id', None)
if (encryption_key_id and
encryption_key_id != cinder_constants.FIXED_KEY_ID):
backup_ref['backup']['encryption_key_id'] = encryption_key_id
return backup_ref
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/views/clusters.py 0000664 0000000 0000000 00000005077 15131732575 0024321 0 ustar 00root root 0000000 0000000 # Copyright (c) 2016 Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import timeutils
class ViewBuilder(object):
"""Map Cluster into dicts for API responses."""
@staticmethod
def _normalize(date):
if date:
return timeutils.normalize_time(date)
return ''
@classmethod
def detail(cls, cluster, replication_data=False, flat=False):
"""Detailed view of a cluster."""
result = cls.summary(cluster, flat=True)
result.update(
num_hosts=cluster.num_hosts,
num_down_hosts=cluster.num_down_hosts,
last_heartbeat=cls._normalize(cluster.last_heartbeat),
created_at=cls._normalize(cluster.created_at),
updated_at=cls._normalize(cluster.updated_at),
disabled_reason=cluster.disabled_reason,
replication_status=cluster.replication_status,
frozen=cluster.frozen,
active_backend_id=cluster.active_backend_id,
)
if not replication_data:
for field in ('replication_status', 'frozen', 'active_backend_id'):
del result[field]
if flat:
return result
return {'cluster': result}
@staticmethod
def summary(cluster, replication_data=False, flat=False):
"""Generic, non-detailed view of a cluster."""
result = {
'name': cluster.name,
'binary': cluster.binary,
'state': 'up' if cluster.is_up else 'down',
'status': 'disabled' if cluster.disabled else 'enabled',
'replication_status': cluster.replication_status,
}
if not replication_data:
del result['replication_status']
if flat:
return result
return {'cluster': result}
@classmethod
def list(cls, clusters, detail=False, replication_data=False):
func = cls.detail if detail else cls.summary
return {'clusters': [func(n, replication_data, flat=True)
for n in clusters]}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/views/default_types.py 0000664 0000000 0000000 00000004365 15131732575 0025324 0 ustar 00root root 0000000 0000000 # Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class ViewBuilder(object):
"""Model default type API response as a python dictionary."""
_collection_name = "default_types"
def _convert_to_dict(self, default):
return {'project_id': default.project_id,
'volume_type_id': default.volume_type_id}
def create(self, default_type):
"""Detailed view of a default type when set."""
return {'default_type': self._convert_to_dict(default_type)}
def index(self, default_types):
"""Build a view of a list of default types.
.. code-block:: json
{"default_types":
[
{
"project_id": "248592b4-a6da-4c4c-abe0-9d8dbe0b74b4",
"volume_type_id": "7152eb1e-aef0-4bcd-a3ab-46b7ef17e2e6"
},
{
"project_id": "1234567-4c4c-abcd-abe0-1a2b3c4d5e6ff",
"volume_type_id": "5e3b298a-f1fc-4d32-9828-0d720da81ddd"
}
]
}
"""
default_types_view = []
for default_type in default_types:
default_types_view.append(self._convert_to_dict(default_type))
return {'default_types': default_types_view}
def detail(self, default_type):
"""Build a view of a default type.
.. code-block:: json
{"default_type":
{
"project_id": "248592b4-a6da-4c4c-abe0-9d8dbe0b74b4",
"volume_type_id": "6bd1de9a-b8b5-4c43-a597-00170ab06b50"
}
}
"""
return {'default_type': self._convert_to_dict(default_type)}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/views/group_snapshots.py 0000664 0000000 0000000 00000006621 15131732575 0025707 0 ustar 00root root 0000000 0000000 # Copyright (C) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import common
from cinder.api import microversions as mv
from cinder.policies import group_snapshots as policy
class ViewBuilder(common.ViewBuilder):
"""Model group_snapshot API responses as a python dictionary."""
_collection_name = "group_snapshots"
def __init__(self):
"""Initialize view builder."""
super(ViewBuilder, self).__init__()
def summary_list(self, request, group_snapshots):
"""Show a list of group_snapshots without many details."""
return self._list_view(self.summary, request, group_snapshots)
def detail_list(self, request, group_snapshots):
"""Detailed view of a list of group_snapshots ."""
return self._list_view(self.detail, request, group_snapshots)
def summary(self, request, group_snapshot):
"""Generic, non-detailed view of a group_snapshot."""
return {
'group_snapshot': {
'id': group_snapshot.id,
'name': group_snapshot.name,
# NOTE(xyang): group_type_id is added for migrating CGs
# to generic volume groups
'group_type_id': group_snapshot.group_type_id,
}
}
def detail(self, request, group_snapshot):
"""Detailed view of a single group_snapshot."""
group_snapshot_ref = {
'group_snapshot': {
'id': group_snapshot.id,
'group_id': group_snapshot.group_id,
'group_type_id': group_snapshot.group_type_id,
'status': group_snapshot.status,
'created_at': group_snapshot.created_at,
'name': group_snapshot.name,
'description': group_snapshot.description
}
}
req_version = request.api_version_request
context = request.environ['cinder.context']
if req_version.matches(mv.GROUP_GROUPSNAPSHOT_PROJECT_ID, None):
if context.authorize(policy.GROUP_SNAPSHOT_ATTRIBUTES_POLICY,
fatal=False):
group_snapshot_ref['group_snapshot']['project_id'] = (
group_snapshot.project_id)
return group_snapshot_ref
def _list_view(self, func, request, group_snapshots):
"""Provide a view for a list of group_snapshots."""
group_snapshots_list = [func(request, group_snapshot)['group_snapshot']
for group_snapshot in group_snapshots]
group_snapshot_links = self._get_collection_links(
request, group_snapshots_list, self._collection_name)
group_snapshots_dict = dict(group_snapshots=group_snapshots_list)
if group_snapshot_links:
group_snapshots_dict['group_snapshot_links'] = group_snapshot_links
return group_snapshots_dict
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/views/group_types.py 0000664 0000000 0000000 00000003546 15131732575 0025034 0 ustar 00root root 0000000 0000000 # Copyright 2016 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import common
from cinder.policies import group_types as policy
class ViewBuilder(common.ViewBuilder):
def show(self, request, group_type, brief=False):
"""Trim away extraneous group type attributes."""
context = request.environ['cinder.context']
trimmed = dict(id=group_type.get('id'),
name=group_type.get('name'),
description=group_type.get('description'),
is_public=group_type.get('is_public'))
if context.authorize(policy.SHOW_ACCESS_POLICY, fatal=False):
trimmed['group_specs'] = group_type.get('group_specs')
return trimmed if brief else dict(group_type=trimmed)
def index(self, request, group_types):
"""Index over trimmed group types."""
group_types_list = [self.show(request, group_type, True)
for group_type in group_types]
group_type_links = self._get_collection_links(request, group_types,
'group_types')
group_types_dict = dict(group_types=group_types_list)
if group_type_links:
group_types_dict['group_type_links'] = group_type_links
return group_types_dict
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/views/groups.py 0000664 0000000 0000000 00000007540 15131732575 0023771 0 ustar 00root root 0000000 0000000 # Copyright (C) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import common
from cinder.api import microversions as mv
from cinder.policies import groups as policy
from cinder import utils
class ViewBuilder(common.ViewBuilder):
"""Model group API responses as a python dictionary."""
_collection_name = "groups"
def __init__(self):
"""Initialize view builder."""
super(ViewBuilder, self).__init__()
def summary_list(self, request, groups):
"""Show a list of groups without many details."""
return self._list_view(self.summary, request, groups)
def detail_list(self, request, groups):
"""Detailed view of a list of groups ."""
return self._list_view(self.detail, request, groups)
def summary(self, request, group):
"""Generic, non-detailed view of a group."""
return {
'group': {
'id': group.id,
'name': group.name
}
}
def detail(self, request, group):
"""Detailed view of a single group."""
context = request.environ['cinder.context']
group_ref = {
'group': {
'id': group.id,
'status': group.status,
'availability_zone': group.availability_zone,
'created_at': group.created_at,
'name': group.name,
'description': group.description,
'group_type': group.group_type_id,
'volume_types': [v_type.id for v_type in group.volume_types],
}
}
req_version = request.api_version_request
# Add group_snapshot_id and source_group_id if min version is greater
# than or equal to GROUP_SNAPSHOTS.
if req_version.matches(mv.GROUP_SNAPSHOTS, None):
group_ref['group']['group_snapshot_id'] = group.group_snapshot_id
group_ref['group']['source_group_id'] = group.source_group_id
# Add volumes if min version is greater than or equal to
# GROUP_VOLUME_LIST.
if req_version.matches(mv.GROUP_VOLUME_LIST, None):
if utils.get_bool_param('list_volume', request.params):
group_ref['group']['volumes'] = [volume.id
for volume in group.volumes]
# Add replication_status if min version is greater than or equal
# to GROUP_REPLICATION.
if req_version.matches(mv.GROUP_REPLICATION, None):
group_ref['group']['replication_status'] = group.replication_status
if req_version.matches(mv.GROUP_GROUPSNAPSHOT_PROJECT_ID, None):
if context.authorize(policy.GROUP_ATTRIBUTES_POLICY, fatal=False):
group_ref['group']['project_id'] = group.project_id
return group_ref
def _list_view(self, func, request, groups):
"""Provide a view for a list of groups."""
groups_list = [
func(request, group)['group']
for group in groups]
grp_links = self._get_collection_links(request,
groups,
self._collection_name)
groups_dict = dict(groups=groups_list)
if grp_links:
groups_dict['group_links'] = grp_links
return groups_dict
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/views/messages.py 0000664 0000000 0000000 00000005462 15131732575 0024262 0 ustar 00root root 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import common
class ViewBuilder(common.ViewBuilder):
"""Model a server API response as a python dictionary."""
_collection_name = "messages"
def index(self, request, messages, message_count=None):
"""Show a list of messages."""
return self._list_view(self.detail, request, messages, message_count)
def detail(self, request, message):
"""Detailed view of a single message."""
message_ref = {
'id': message.get('id'),
'event_id': message.get('event_id'),
'user_message': message.get('user_message'),
'message_level': message.get('message_level'),
'created_at': message.get('created_at'),
'guaranteed_until': message.get('expires_at'),
'request_id': message.get('request_id'),
'links': self._get_links(request, message['id']),
}
if message.get('resource_type'):
message_ref['resource_type'] = message.get('resource_type')
if message.get('resource_uuid'):
message_ref['resource_uuid'] = message.get('resource_uuid')
return {'message': message_ref}
def _list_view(self, func, request, messages, message_count=None,
coll_name=_collection_name):
"""Provide a view for a list of messages.
:param func: Function used to format the message data
:param request: API request
:param messages: List of messages in dictionary format
:param message_count: Length of the original list of messages
:param coll_name: Name of collection, used to generate the next link
for a pagination query
:returns: message data in dictionary format
"""
messages_list = [func(request, message)['message']
for message in messages]
messages_links = self._get_collection_links(request,
messages,
coll_name,
message_count)
messages_dict = dict(messages=messages_list)
if messages_links:
messages_dict['messages_links'] = messages_links
return messages_dict
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/views/resource_filters.py 0000664 0000000 0000000 00000002244 15131732575 0026025 0 ustar 00root root 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class ViewBuilder(object):
"""Model an resource filters API response as a python dictionary."""
_collection_name = "resource_filters"
@classmethod
def list(cls, filters):
"""Build a view of a list of resource filters.
.. code-block:: json
{
"resource_filters": [{
"resource": "resource_1",
"filters": ["filter1", "filter2", "filter3"]
}]
}
"""
return {'resource_filters': [{
'resource': fil[0],
'filters': fil[1]} for fil in filters.items()]}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/views/snapshots.py 0000664 0000000 0000000 00000003056 15131732575 0024472 0 ustar 00root root 0000000 0000000 # Copyright 2016 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import microversions as mv
from cinder.api.views import snapshots as views_v2
class ViewBuilder(views_v2.ViewBuilder):
"""Model a snapshots API V3 response as a python dictionary."""
def detail(self, request, snapshot):
"""Detailed view of a single snapshot."""
snapshot_ref = super(ViewBuilder, self).detail(request, snapshot)
req_version = request.api_version_request
# Add group_snapshot_id if min version is greater than or equal
# to GROUP_SNAPSHOTS.
snap = snapshot_ref['snapshot']
if req_version.matches(mv.GROUP_SNAPSHOTS, None):
snap['group_snapshot_id'] = snapshot.get('group_snapshot_id')
if req_version.matches(mv.SNAPSHOT_LIST_USER_ID, None):
snap['user_id'] = snapshot.get('user_id')
if req_version.matches(mv.USE_QUOTA):
snap['consumes_quota'] = snapshot.get('use_quota')
return snapshot_ref
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/views/types.py 0000664 0000000 0000000 00000005166 15131732575 0023620 0 ustar 00root root 0000000 0000000 # Copyright 2012 Red Hat, Inc.
# Copyright 2015 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import common
from cinder.policies import type_extra_specs as extra_specs_policy
from cinder.policies import volume_type as policy
class ViewBuilder(common.ViewBuilder):
def show(self, request, volume_type, brief=False):
"""Trim away extraneous volume type attributes."""
context = request.environ['cinder.context']
trimmed = dict(id=volume_type.get('id'),
name=volume_type.get('name'),
is_public=volume_type.get('is_public'),
description=volume_type.get('description'))
if context.authorize(policy.EXTRA_SPEC_POLICY, fatal=False):
extra_specs = volume_type.get('extra_specs', {})
if context.authorize(extra_specs_policy.READ_SENSITIVE_POLICY,
fatal=False):
trimmed_specs = extra_specs
else:
# Limit the response to contain only user visible specs.
trimmed_specs = {}
for uv_spec in extra_specs_policy.USER_VISIBLE_EXTRA_SPECS:
if uv_spec in extra_specs:
trimmed_specs[uv_spec] = extra_specs[uv_spec]
trimmed['extra_specs'] = trimmed_specs
if context.authorize(policy.QOS_POLICY, fatal=False):
trimmed['qos_specs_id'] = volume_type.get('qos_specs_id')
return trimmed if brief else dict(volume_type=trimmed)
def index(self, request, volume_types):
"""Index over trimmed volume types."""
volume_types_list = [self.show(request, volume_type, True)
for volume_type in volume_types]
volume_type_links = self._get_collection_links(request, volume_types,
'types')
volume_types_dict = dict(volume_types=volume_types_list)
if volume_type_links:
volume_types_dict['volume_type_links'] = volume_type_links
return volume_types_dict
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/views/volumes.py 0000664 0000000 0000000 00000025160 15131732575 0024142 0 ustar 00root root 0000000 0000000 # Copyright 2016 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import common
from cinder.api import microversions as mv
from cinder.common import constants as cinder_constants
from cinder import group as group_api
from cinder.objects import fields
from cinder.volume import group_types
class ViewBuilder(common.ViewBuilder):
"""Model a volumes API V3 response as a python dictionary."""
_collection_name = "volumes"
def summary_list(self, request, volumes, volume_count=None):
"""Show a list of volumes without many details."""
return self._list_view(self.summary, request, volumes,
volume_count)
def detail_list(self, request, volumes, volume_count=None):
"""Detailed view of a list of volumes."""
return self._list_view(self.detail, request, volumes,
volume_count,
self._collection_name + '/detail')
def summary(self, request, volume):
"""Generic, non-detailed view of a volume."""
return {
'volume': {
'id': volume['id'],
'name': volume['display_name'],
'links': self._get_links(request,
volume['id']),
},
}
def quick_summary(self, volume_count, volume_size,
all_distinct_metadata=None):
"""View of volumes summary.
It includes number of volumes, size of volumes and all distinct
metadata of volumes.
"""
summary = {
'volume-summary': {
'total_count': volume_count,
'total_size': volume_size
}
}
if all_distinct_metadata is not None:
summary['volume-summary']['metadata'] = all_distinct_metadata
return summary
def _get_volume_status(self, volume):
# NOTE(wanghao): for fixing bug 1504007, we introduce 'managing',
# 'error_managing' and 'error_managing_deleting' status into managing
# process, but still expose 'creating' and 'error' and 'deleting'
# status to user for API compatibility.
status_map = {
'managing': 'creating',
'error_managing': 'error',
'error_managing_deleting': 'deleting',
}
vol_status = volume.get('status')
return status_map.get(vol_status, vol_status)
def _get_volume_metadata(self, volume):
"""Retrieve the metadata of the volume object."""
return volume.metadata
def _get_volume_type(self, request, volume):
"""Retrieve the type of the volume object.
Retrieves the volume type name for microversion 3.63.
Otherwise, it uses either the name or ID.
"""
req_version = request.api_version_request
if req_version.matches(mv.VOLUME_TYPE_ID_IN_VOLUME_DETAIL):
if volume.get('volume_type'):
return volume['volume_type']['name']
return None
if volume['volume_type_id'] and volume.get('volume_type'):
return volume['volume_type']['name']
else:
return volume['volume_type_id']
def _is_volume_encrypted(self, volume):
"""Determine if volume is encrypted."""
return volume.get('encryption_key_id') is not None
def _get_attachments(self, volume, ctxt):
"""Retrieve the attachments of the volume object."""
attachments = []
for attachment in volume.volume_attachment:
if (
attachment.get('attach_status') ==
fields.VolumeAttachStatus.ATTACHED
):
a = {'id': attachment.get('volume_id'),
'attachment_id': attachment.get('id'),
'volume_id': attachment.get('volume_id'),
'server_id': attachment.get('instance_uuid'),
'host_name': None,
'device': attachment.get('mountpoint'),
'attached_at': attachment.get('attach_time'),
}
# When glance is cinder backed, we require the
# host_name to determine when to detach a multiattach
# volume. Glance always uses service credentials to
# request Cinder so we are not exposing the host value
# to end users (non-admin).
if ctxt.is_admin or 'service' in ctxt.roles:
a['host_name'] = attachment.get('attached_host')
attachments.append(a)
return attachments
def legacy_detail(self, request, volume):
"""Detailed view of a single volume."""
volume_ref = {
'volume': {
'id': volume.get('id'),
'status': self._get_volume_status(volume),
'size': volume.get('size'),
'availability_zone': volume.get('availability_zone'),
'created_at': volume.get('created_at'),
'updated_at': volume.get('updated_at'),
'name': volume.get('display_name'),
'description': volume.get('display_description'),
'volume_type': self._get_volume_type(request, volume),
'snapshot_id': volume.get('snapshot_id'),
'source_volid': volume.get('source_volid'),
'metadata': self._get_volume_metadata(volume),
'links': self._get_links(request, volume['id']),
'user_id': volume.get('user_id'),
'bootable': str(volume.get('bootable')).lower(),
'encrypted': self._is_volume_encrypted(volume),
'replication_status': volume.get('replication_status'),
'consistencygroup_id': volume.get('consistencygroup_id'),
'multiattach': volume.get('multiattach'),
}
}
ctxt = request.environ['cinder.context']
attachments = self._get_attachments(volume, ctxt)
volume_ref['volume']['attachments'] = attachments
if ctxt.is_admin:
volume_ref['volume']['migration_status'] = (
volume.get('migration_status'))
# NOTE(xyang): Display group_id as consistencygroup_id in detailed
# view of the volume if group is converted from cg.
group_id = volume.get('group_id')
if group_id is not None:
# Not found exception will be handled at the wsgi level
grp = group_api.API().get(ctxt, group_id)
cgsnap_type = group_types.get_default_cgsnapshot_type()
if grp.group_type_id == cgsnap_type['id']:
volume_ref['volume']['consistencygroup_id'] = group_id
return volume_ref
def detail(self, request, volume):
"""Detailed view of a single volume."""
volume_ref = self.legacy_detail(request, volume)
req_version = request.api_version_request
# Add group_id if min version is greater than or equal to GROUP_VOLUME.
if req_version.matches(mv.GROUP_VOLUME, None):
volume_ref['volume']['group_id'] = volume.get('group_id')
# Add provider_id if min version is greater than or equal to
# VOLUME_DETAIL_PROVIDER_ID for admin.
if (request.environ['cinder.context'].is_admin and
req_version.matches(mv.VOLUME_DETAIL_PROVIDER_ID, None)):
volume_ref['volume']['provider_id'] = volume.get('provider_id')
if req_version.matches(
mv.VOLUME_SHARED_TARGETS_AND_SERVICE_FIELDS, None):
# For microversion 3.69 or higher it is acceptable to be null
# but for earlier versions we convert None to True
shared = volume.get('shared_targets', False)
if (not req_version.matches(mv.SHARED_TARGETS_TRISTATE, None)
and shared is None):
shared = True
volume_ref['volume']['shared_targets'] = shared
volume_ref['volume']['service_uuid'] = volume.get(
'service_uuid', None)
if (request.environ['cinder.context'].is_admin and req_version.matches(
mv.VOLUME_CLUSTER_NAME, None)):
volume_ref['volume']['cluster_name'] = volume.get(
'cluster_name', None)
if req_version.matches(mv.VOLUME_TYPE_ID_IN_VOLUME_DETAIL, None):
volume_ref[
'volume']["volume_type_id"] = volume['volume_type'].get('id')
if req_version.matches(mv.ENCRYPTION_KEY_ID_IN_DETAILS, None):
encryption_key_id = volume.get('encryption_key_id', None)
if (encryption_key_id and
encryption_key_id != cinder_constants.FIXED_KEY_ID):
volume_ref['volume']['encryption_key_id'] = encryption_key_id
if req_version.matches(mv.USE_QUOTA):
volume_ref['volume']['consumes_quota'] = volume.get('use_quota')
return volume_ref
def _list_view(self, func, request, volumes, volume_count,
coll_name=_collection_name):
"""Provide a view for a list of volumes.
:param func: Function used to format the volume data
:param request: API request
:param volumes: List of volumes in dictionary format
:param volume_count: Length of the original list of volumes
:param coll_name: Name of collection, used to generate the next link
for a pagination query
:returns: Volume data in dictionary format
"""
volumes_list = [func(request, volume)['volume'] for volume in volumes]
volumes_links = self._get_collection_links(request,
volumes,
coll_name,
volume_count)
volumes_dict = {"volumes": volumes_list}
if volumes_links:
volumes_dict['volumes_links'] = volumes_links
req_version = request.api_version_request
if req_version.matches(
mv.SUPPORT_COUNT_INFO, None) and volume_count is not None:
volumes_dict['count'] = volume_count
return volumes_dict
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/views/workers.py 0000664 0000000 0000000 00000001646 15131732575 0024147 0 ustar 00root root 0000000 0000000 # Copyright (c) 2016 Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class ViewBuilder(object):
"""Map Cluster into dicts for API responses."""
_collection_name = 'workers'
@classmethod
def service_list(cls, services):
return [{'id': s.id, 'host': s.host, 'binary': s.binary,
'cluster_name': s.cluster_name} for s in services]
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/volume_manage.py 0000664 0000000 0000000 00000003240 15131732575 0024125 0 ustar 00root root 0000000 0000000 # Copyright (c) 2016 Stratoscale, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import HTTPStatus
from cinder.api.contrib import volume_manage
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import volume_manage as schema
from cinder.api.v3 import resource_common_manage as common
from cinder.api import validation
class VolumeManageController(
common.ManageResource, volume_manage.VolumeManageController
):
def __init__(self, *args, **kwargs):
super(VolumeManageController, self).__init__(*args, **kwargs)
self._set_resource_type('volume')
@wsgi.response(HTTPStatus.ACCEPTED)
@validation.schema(schema.volume_manage_create, mv.BASE_VERSION,
mv.get_prior_version(mv.VOLUME_MIGRATE_CLUSTER))
@validation.schema(schema.volume_manage_create_v316,
mv.VOLUME_MIGRATE_CLUSTER)
def create(self, req, body):
self._ensure_min_version(req, mv.MANAGE_EXISTING_LIST)
return super(VolumeManageController, self).create(req, body=body)
def create_resource():
return wsgi.Resource(VolumeManageController())
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/volume_metadata.py 0000664 0000000 0000000 00000014773 15131732575 0024472 0 ustar 00root root 0000000 0000000 # Copyright 2016 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volume metadata V3 api."""
import hashlib
from http import HTTPStatus
from oslo_serialization import jsonutils
import webob
from cinder.api import common
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import volume_metadata as schema
from cinder.api import validation
from cinder import exception
from cinder.i18n import _
from cinder import volume
class VolumeMetadataController(wsgi.Controller):
"""The volume metadata API controller for the OpenStack API."""
def __init__(self):
self.volume_api = volume.API()
super().__init__()
def _get_metadata(self, context, volume_id):
# The metadata is at the second position of the tuple returned
# from _get_volume_and_metadata
return self._get_volume_and_metadata(context, volume_id)[1]
def _get_volume_and_metadata(self, context, volume_id):
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, volume_id)
meta = self.volume_api.get_volume_metadata(context, volume)
return (volume, meta)
def _validate_etag(self, req, volume_id):
if not req.if_match:
return True
context = req.environ['cinder.context']
metadata = self._get_metadata(context, volume_id)
data = jsonutils.dumps({"metadata": metadata})
data = data.encode('utf-8')
checksum = hashlib.md5(data, usedforsecurity=False).hexdigest()
return checksum in req.if_match.etags
@wsgi.extends
def index(self, req, volume_id):
context = req.environ['cinder.context']
metadata = {'metadata': self._get_metadata(context, volume_id)}
if req.api_version_request.matches(mv.ETAGS):
data = jsonutils.dumps(metadata)
data = data.encode('utf-8')
resp = webob.Response()
resp.headers['Etag'] = hashlib.md5(
data, usedforsecurity=False).hexdigest()
resp.body = data
return resp
return metadata
@validation.schema(schema.create)
def create(self, req, volume_id, body):
context = req.environ['cinder.context']
metadata = body['metadata']
new_metadata = self._update_volume_metadata(
context, volume_id, metadata, delete=False, use_create=True)
return {'metadata': new_metadata}
def _update_volume_metadata(self, context, volume_id, metadata,
delete=False, use_create=False):
try:
volume = self.volume_api.get(context, volume_id)
if use_create:
return self.volume_api.create_volume_metadata(context, volume,
metadata)
else:
return self.volume_api.update_volume_metadata(
context, volume, metadata, delete,
meta_type=common.METADATA_TYPES.user)
# Not found exception will be handled at the wsgi level
except (ValueError, AttributeError):
msg = _("Malformed request body")
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.InvalidVolumeMetadata as error:
raise webob.exc.HTTPBadRequest(explanation=error.msg)
except exception.InvalidVolumeMetadataSize as error:
raise webob.exc.HTTPRequestEntityTooLarge(explanation=error.msg)
@wsgi.extends
@validation.schema(schema.update)
def update(self, req, volume_id, id, body):
if req.api_version_request.matches(mv.ETAGS):
if not self._validate_etag(req, volume_id):
return webob.Response(
status_int=HTTPStatus.PRECONDITION_FAILED)
meta_item = body['meta']
if id not in meta_item:
expl = _('Request body and URI mismatch')
raise webob.exc.HTTPBadRequest(explanation=expl)
if len(meta_item) > 1:
expl = _('Request body contains too many items')
raise webob.exc.HTTPBadRequest(explanation=expl)
context = req.environ['cinder.context']
self._update_volume_metadata(
context, volume_id, meta_item, delete=False)
return {'meta': meta_item}
@wsgi.extends
@validation.schema(schema.create)
def update_all(self, req, volume_id, body):
if req.api_version_request.matches(mv.ETAGS):
if not self._validate_etag(req, volume_id):
return webob.Response(
status_int=HTTPStatus.PRECONDITION_FAILED)
metadata = body['metadata']
context = req.environ['cinder.context']
new_metadata = self._update_volume_metadata(
context, volume_id, metadata, delete=True)
return {'metadata': new_metadata}
def show(self, req, volume_id, id):
"""Return a single metadata item."""
context = req.environ['cinder.context']
data = self._get_metadata(context, volume_id)
try:
return {'meta': {id: data[id]}}
except KeyError:
raise exception.VolumeMetadataNotFound(volume_id=volume_id,
metadata_key=id)
def delete(self, req, volume_id, id):
"""Deletes an existing metadata."""
context = req.environ['cinder.context']
volume, metadata = self._get_volume_and_metadata(context, volume_id)
if id not in metadata:
raise exception.VolumeMetadataNotFound(volume_id=volume_id,
metadata_key=id)
# Not found exception will be handled at the wsgi level
self.volume_api.delete_volume_metadata(
context,
volume,
id,
meta_type=common.METADATA_TYPES.user)
return webob.Response(status_int=HTTPStatus.OK)
def create_resource():
return wsgi.Resource(VolumeMetadataController())
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/volume_transfer.py 0000664 0000000 0000000 00000011253 15131732575 0024524 0 ustar 00root root 0000000 0000000 # Copyright 2018 FiberHome Telecommunication Technologies CO.,LTD
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import HTTPStatus
from oslo_log import log as logging
from oslo_utils import strutils
from webob import exc
from cinder.api import common
from cinder.api.contrib import volume_transfer
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import volume_transfer as schema
from cinder.api import validation
from cinder import exception
LOG = logging.getLogger(__name__)
class VolumeTransferController(volume_transfer.VolumeTransferController):
"""The transfer API controller for the OpenStack API V3."""
def _get_transfers(self, req, is_detail):
"""Returns a list of transfers, transformed through view builder."""
context = req.environ['cinder.context']
req_version = req.api_version_request
params = req.params.copy()
marker = limit = offset = None
if req_version.matches(mv.SUPPORT_TRANSFER_PAGINATION):
marker, limit, offset = common.get_pagination_params(params)
sort_keys, sort_dirs = common.get_sort_params(params)
else:
# NOTE(yikun): After microversion SUPPORT_TRANSFER_PAGINATION,
# transfers list api use the ['created_at'], ['asc']
# as default order, but we should keep the compatible in here.
sort_keys, sort_dirs = ['created_at', 'id'], ['asc', 'asc']
filters = params
if 'name' in filters:
filters['display_name'] = filters.pop('name')
LOG.debug('Listing volume transfers')
transfers = self.transfer_api.get_all(context, marker=marker,
limit=limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters,
offset=offset)
transfer_count = len(transfers)
limited_list = common.limited(transfers, req)
if is_detail:
transfers = self._view_builder.detail_list(req, limited_list,
transfer_count)
else:
transfers = self._view_builder.summary_list(req, limited_list,
transfer_count)
return transfers
def index(self, req):
"""Returns a summary list of transfers."""
return self._get_transfers(req, is_detail=False)
def detail(self, req):
"""Returns a detailed list of transfers."""
return self._get_transfers(req, is_detail=True)
@wsgi.response(HTTPStatus.ACCEPTED)
@validation.schema(schema.create, mv.BASE_VERSION,
mv.get_prior_version(mv.TRANSFER_WITH_SNAPSHOTS))
@validation.schema(schema.create_v355, mv.TRANSFER_WITH_SNAPSHOTS)
def create(self, req, body):
"""Create a new volume transfer."""
LOG.debug('Creating new volume transfer %s', body)
context = req.environ['cinder.context']
transfer = body['transfer']
volume_id = transfer['volume_id']
name = transfer.get('name', None)
if name is not None:
name = name.strip()
no_snapshots = strutils.bool_from_string(transfer.get('no_snapshots',
False))
req_version = req.api_version_request
allow_encrypted = req_version.matches(mv.TRANSFER_ENCRYPTED_VOLUME)
LOG.info("Creating transfer of volume %s", volume_id)
try:
new_transfer = self.transfer_api.create(
context, volume_id, name,
no_snapshots=no_snapshots, allow_encrypted=allow_encrypted)
# Not found exception will be handled at the wsgi level
except exception.Invalid as error:
raise exc.HTTPBadRequest(explanation=error.msg)
transfer = self._view_builder.create(req,
dict(new_transfer))
return transfer
def create_resource():
return wsgi.Resource(VolumeTransferController())
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/volumes.py 0000664 0000000 0000000 00000052065 15131732575 0023011 0 ustar 00root root 0000000 0000000 #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes V3 api."""
from http import HTTPStatus
from oslo_log import log as logging
from oslo_utils import timeutils
from oslo_utils import uuidutils
import webob
from webob import exc
from cinder.api import api_utils
from cinder.api import common
from cinder.api.contrib import scheduler_hints
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import volumes as schema
from cinder.api.v3.views import volumes as volume_views_v3
from cinder.api import validation
from cinder.backup import api as backup_api
from cinder import exception
from cinder import group as group_api
from cinder.i18n import _
from cinder.image import glance
from cinder import objects
from cinder.policies import volumes as policy
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
_MV_ADDED_FILTERS = (
(mv.get_prior_version(mv.VOLUME_LIST_GLANCE_METADATA),
'glance_metadata'),
(mv.get_prior_version(mv.VOLUME_LIST_GROUP), 'group_id'),
(mv.get_prior_version(mv.VOLUME_TIME_COMPARISON_FILTER), 'created_at'),
(mv.get_prior_version(mv.VOLUME_TIME_COMPARISON_FILTER), 'updated_at'),
# REST API receives consumes_quota, but process_general_filtering
# transforms it into use_quota
(mv.get_prior_version(mv.USE_QUOTA), 'use_quota'),
)
class VolumeController(wsgi.Controller):
"""The Volumes API controller for the OpenStack API V3."""
_view_builder_class = volume_views_v3.ViewBuilder
def __init__(self, ext_mgr=None):
self.volume_api = cinder_volume.API()
self.group_api = group_api.API()
self.backup_api = backup_api.API()
self.ext_mgr = ext_mgr
super().__init__()
def delete(self, req, id):
"""Delete a volume."""
context = req.environ['cinder.context']
req_version = req.api_version_request
cascade = utils.get_bool_param('cascade', req.params)
force = False
params = ""
if req_version.matches(mv.VOLUME_DELETE_FORCE):
force = utils.get_bool_param('force', req.params)
if cascade or force:
params = "(cascade: %(c)s, force: %(f)s)" % {'c': cascade,
'f': force}
LOG.info("Delete volume with id: %(id)s %(params)s",
{'id': id, 'params': params}, context=context)
volume = self.volume_api.get(context, id)
if force:
context.authorize(policy.FORCE_DELETE_POLICY, target_obj=volume)
self.volume_api.delete(context, volume,
cascade=cascade,
force=force)
return webob.Response(status_int=HTTPStatus.ACCEPTED)
def show(self, req, id):
"""Return data about the given volume."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
vol = self.volume_api.get(context, id, viewable_admin_meta=True)
req.cache_db_volume(vol)
api_utils.add_visible_admin_metadata(vol)
return self._view_builder.detail(req, vol)
def _get_volume_filter_options(self):
"""Return volume search options allowed by non-admin."""
return common.get_enabled_resource_filters('volume').get('volume', [])
@common.process_general_filtering('volume')
def _process_volume_filtering(self, context=None, filters=None,
req_version=None):
for version, field in _MV_ADDED_FILTERS:
if req_version.matches(None, version):
filters.pop(field, None)
api_utils.remove_invalid_filter_options(
context, filters,
self._get_volume_filter_options())
def _handle_time_comparison_filters(self, filters):
for time_comparison_filter in ['created_at', 'updated_at']:
if time_comparison_filter in filters:
time_filter_dict = {}
comparison_units = filters[time_comparison_filter].split(',')
operators = common.get_time_comparison_operators()
for comparison_unit in comparison_units:
try:
operator_and_time = comparison_unit.split(":")
comparison_operator = operator_and_time[0]
time = ''
for time_str in operator_and_time[1:-1]:
time += time_str + ":"
time += operator_and_time[-1]
if comparison_operator not in operators:
msg = _(
'Invalid %s operator') % comparison_operator
raise exc.HTTPBadRequest(explanation=msg)
except IndexError:
msg = _('Invalid %s value') % time_comparison_filter
raise exc.HTTPBadRequest(explanation=msg)
try:
parsed_time = timeutils.parse_isotime(time)
except ValueError:
msg = _('Invalid %s value') % time
raise exc.HTTPBadRequest(explanation=msg)
time_filter_dict[comparison_operator] = parsed_time
filters[time_comparison_filter] = time_filter_dict
def _get_volumes(self, req, is_detail):
"""Returns a list of volumes, transformed through view builder."""
context = req.environ['cinder.context']
req_version = req.api_version_request
params = req.params.copy()
marker, limit, offset = common.get_pagination_params(params)
sort_keys, sort_dirs = common.get_sort_params(params)
filters = params
show_count = False
if req_version.matches(
mv.SUPPORT_COUNT_INFO) and 'with_count' in filters:
show_count = utils.get_bool_param('with_count', filters)
filters.pop('with_count')
self._process_volume_filtering(context=context, filters=filters,
req_version=req_version)
# NOTE: it's 'name' in the REST API, but 'display_name' in the
# database layer, so we need to do this translation
if 'name' in sort_keys:
sort_keys[sort_keys.index('name')] = 'display_name'
if 'name' in filters:
filters['display_name'] = filters.pop('name')
if 'use_quota' in filters:
filters['use_quota'] = utils.get_bool_param('use_quota', filters)
self._handle_time_comparison_filters(filters)
strict = req.api_version_request.matches(
mv.VOLUME_LIST_BOOTABLE, None)
self.volume_api.check_volume_filters(filters, strict)
volumes = self.volume_api.get_all(context, marker, limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters.copy(),
viewable_admin_meta=True,
offset=offset)
total_count = None
if show_count:
total_count = self.volume_api.calculate_resource_count(
context, 'volume', filters)
for volume in volumes:
api_utils.add_visible_admin_metadata(volume)
req.cache_db_volumes(volumes.objects)
if is_detail:
volumes = self._view_builder.detail_list(
req, volumes, total_count)
else:
volumes = self._view_builder.summary_list(
req, volumes, total_count)
return volumes
def index(self, req):
"""Returns a summary list of volumes."""
return self._get_volumes(req, is_detail=False)
def detail(self, req):
"""Returns a detailed list of volumes."""
return self._get_volumes(req, is_detail=True)
@wsgi.Controller.api_version(mv.VOLUME_SUMMARY)
def summary(self, req):
"""Return summary of volumes."""
view_builder_v3 = volume_views_v3.ViewBuilder()
context = req.environ['cinder.context']
filters = req.params.copy()
api_utils.remove_invalid_filter_options(
context,
filters,
self._get_volume_filter_options())
num_vols, sum_size, metadata = self.volume_api.get_volume_summary(
context, filters=filters)
req_version = req.api_version_request
if req_version.matches(mv.VOLUME_SUMMARY_METADATA):
all_distinct_metadata = metadata
else:
all_distinct_metadata = None
return view_builder_v3.quick_summary(num_vols, int(sum_size),
all_distinct_metadata)
@wsgi.response(HTTPStatus.ACCEPTED)
@wsgi.Controller.api_version(mv.VOLUME_REVERT)
@wsgi.action('revert')
@validation.schema(schema.revert)
def revert(self, req, id, body):
"""revert a volume to a snapshot"""
context = req.environ['cinder.context']
snapshot_id = body['revert'].get('snapshot_id')
volume = self.volume_api.get_volume(context, id)
try:
l_snap = volume.get_latest_snapshot()
except exception.VolumeSnapshotNotFound:
msg = _("Volume %s doesn't have any snapshots.")
raise exc.HTTPBadRequest(explanation=msg % volume.id)
# Ensure volume and snapshot match.
if snapshot_id is None or snapshot_id != l_snap.id:
msg = _("Specified snapshot %(s_id)s is None or not "
"the latest one of volume %(v_id)s.")
raise exc.HTTPBadRequest(explanation=msg % {'s_id': snapshot_id,
'v_id': volume.id})
if volume.size != l_snap.volume_size:
msg = _("Can't revert volume %(v_id)s to its latest snapshot "
"%(s_id)s. The volume size must be equal to the snapshot "
"size.")
raise exc.HTTPBadRequest(explanation=msg % {'s_id': snapshot_id,
'v_id': volume.id})
try:
msg = 'Reverting volume %(v_id)s to snapshot %(s_id)s.'
LOG.info(msg, {'v_id': volume.id,
's_id': l_snap.id})
self.volume_api.revert_to_snapshot(context, volume, l_snap)
except (exception.InvalidVolume, exception.InvalidSnapshot) as e:
raise exc.HTTPConflict(explanation=str(e))
def _image_uuid_from_ref(self, image_ref, context):
# If the image ref was generated by nova api, strip image_ref
# down to an id.
image_uuid = None
try:
image_uuid = image_ref.split('/').pop()
except AttributeError:
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
image_service = glance.get_default_image_service()
# First see if this is an actual image ID
if uuidutils.is_uuid_like(image_uuid):
try:
image = image_service.show(context, image_uuid)
if 'id' in image:
return image['id']
except Exception:
# Pass and see if there is a matching image name
pass
# Could not find by ID, check if it is an image name
try:
params = {'filters': {'name': image_ref}}
images = list(image_service.detail(context, **params))
if len(images) > 1:
msg = _("Multiple matches found for '%s', use an ID to be more"
" specific.") % image_ref
raise exc.HTTPConflict(explanation=msg)
for img in images:
return img['id']
except exc.HTTPConflict:
raise
except Exception:
# Pass the other exception and let default not found error
# handling take care of it
pass
msg = _("Invalid image identifier or unable to "
"access requested image.")
raise exc.HTTPBadRequest(explanation=msg)
def _get_image_snapshot(self, context, image_uuid):
image_snapshot = None
if image_uuid:
image_service = glance.get_default_image_service()
image_meta = image_service.show(context, image_uuid)
if image_meta is not None:
bdms = image_meta.get('properties', {}).get(
'block_device_mapping', [])
if bdms:
boot_bdm = [bdm for bdm in bdms if (
bdm.get('source_type') == 'snapshot' and
bdm.get('boot_index') == 0)]
if boot_bdm:
try:
image_snapshot = self.volume_api.get_snapshot(
context, boot_bdm[0].get('snapshot_id'))
return image_snapshot
except exception.NotFound:
explanation = _(
'Nova specific image is found, but boot '
'volume snapshot id:%s not found.'
) % boot_bdm[0].get('snapshot_id')
raise exc.HTTPNotFound(explanation=explanation)
return image_snapshot
@wsgi.response(HTTPStatus.ACCEPTED)
@validation.schema(schema.create, mv.BASE_VERSION,
mv.get_prior_version(mv.GROUP_VOLUME))
@validation.schema(schema.create_volume_v313, mv.GROUP_VOLUME,
mv.get_prior_version(mv.VOLUME_CREATE_FROM_BACKUP))
@validation.schema(schema.create_volume_v347,
mv.VOLUME_CREATE_FROM_BACKUP,
mv.get_prior_version(mv.SUPPORT_VOLUME_SCHEMA_CHANGES))
@validation.schema(schema.create_volume_v353,
mv.SUPPORT_VOLUME_SCHEMA_CHANGES)
def create(self, req, body):
"""Creates a new volume.
:param req: the request
:param body: the request body
:returns: dict -- the new volume dictionary
:raises HTTPNotFound, HTTPBadRequest:
"""
LOG.debug('Create volume request body: %s', body)
context = req.environ['cinder.context']
req_version = req.api_version_request
# NOTE (pooja_jadhav) To fix bug 1774155, scheduler hints is not
# loaded as a standard extension. If user passes
# OS-SCH-HNT:scheduler_hints in the request body, then it will be
# validated in the create method and this method will add
# scheduler_hints in body['volume'].
body = scheduler_hints.create(req, body)
volume = body['volume']
kwargs = {}
self.clean_name_and_description(volume)
# NOTE: it's 'name'/'description' in the REST API, but
# 'display_name'/display_description' in the database layer,
# so we need to do this translation
if 'name' in volume:
volume['display_name'] = volume.pop('name')
if 'description' in volume:
volume['display_description'] = volume.pop('description')
if 'image_id' in volume:
volume['imageRef'] = volume.pop('image_id')
req_volume_type = volume.get('volume_type', None)
if req_volume_type:
# Not found exception will be handled at the wsgi level
kwargs['volume_type'] = (
objects.VolumeType.get_by_name_or_id(context, req_volume_type))
kwargs['metadata'] = volume.get('metadata', None)
snapshot_id = volume.get('snapshot_id')
if snapshot_id is not None:
# Not found exception will be handled at the wsgi level
kwargs['snapshot'] = self.volume_api.get_snapshot(context,
snapshot_id)
else:
kwargs['snapshot'] = None
source_volid = volume.get('source_volid')
if source_volid is not None:
# Not found exception will be handled at the wsgi level
kwargs['source_volume'] = (
self.volume_api.get_volume(context,
source_volid))
else:
kwargs['source_volume'] = None
kwargs['group'] = None
kwargs['consistencygroup'] = None
consistencygroup_id = volume.get('consistencygroup_id')
if consistencygroup_id is not None:
# Not found exception will be handled at the wsgi level
kwargs['group'] = self.group_api.get(context, consistencygroup_id)
# Get group_id if volume is in a group.
group_id = volume.get('group_id')
if group_id is not None:
# Not found exception will be handled at the wsgi level
kwargs['group'] = self.group_api.get(context, group_id)
image_ref = volume.get('imageRef')
if image_ref is not None:
image_uuid = self._image_uuid_from_ref(image_ref, context)
image_snapshot = self._get_image_snapshot(context, image_uuid)
if (req_version.matches(mv.get_api_version(
mv.SUPPORT_NOVA_IMAGE)) and image_snapshot):
kwargs['snapshot'] = image_snapshot
else:
kwargs['image_id'] = image_uuid
backup_id = volume.get('backup_id')
if backup_id:
kwargs['backup'] = self.backup_api.get(context,
backup_id=backup_id)
size = volume.get('size', None)
if size is None and kwargs['snapshot'] is not None:
size = kwargs['snapshot']['volume_size']
elif size is None and kwargs['source_volume'] is not None:
size = kwargs['source_volume']['size']
elif size is None and kwargs.get('backup') is not None:
size = kwargs['backup']['size']
LOG.info("Create volume of %s GB", size)
kwargs['availability_zone'] = volume.get('availability_zone', None)
kwargs['scheduler_hints'] = volume.get('scheduler_hints', None)
multiattach = utils.get_bool_param('multiattach', volume)
if multiattach:
msg = _("multiattach parameter has been removed. The default "
"behavior is to use multiattach enabled volume types. "
"Contact your administrator to create a multiattach "
"enabled volume type and use it to create multiattach "
"volumes.")
raise exc.HTTPBadRequest(explanation=msg)
try:
new_volume = self.volume_api.create(
context, size, volume.get('display_name'),
volume.get('display_description'), **kwargs)
except exception.VolumeTypeDefaultMisconfiguredError as err:
raise exc.HTTPInternalServerError(explanation=err.msg)
retval = self._view_builder.detail(req, new_volume)
return retval
@validation.schema(schema.update, mv.BASE_VERSION,
mv.get_prior_version(mv.SUPPORT_VOLUME_SCHEMA_CHANGES))
@validation.schema(schema.update_v353,
mv.SUPPORT_VOLUME_SCHEMA_CHANGES)
def update(self, req, id, body):
"""Update a volume."""
context = req.environ['cinder.context']
update_dict = body['volume']
self.clean_name_and_description(update_dict)
# NOTE(thingee): v2 API allows name instead of display_name
if 'name' in update_dict:
update_dict['display_name'] = update_dict.pop('name')
# NOTE(thingee): v2 API allows description instead of
# display_description
if 'description' in update_dict:
update_dict['display_description'] = update_dict.pop('description')
# Not found and Invalid exceptions will be handled at the wsgi level
try:
volume = self.volume_api.get(context, id, viewable_admin_meta=True)
volume_utils.notify_about_volume_usage(context, volume,
'update.start')
self.volume_api.update(context, volume, update_dict)
except exception.InvalidVolumeMetadataSize as error:
raise webob.exc.HTTPRequestEntityTooLarge(explanation=error.msg)
volume.update(update_dict)
api_utils.add_visible_admin_metadata(volume)
volume_utils.notify_about_volume_usage(context, volume,
'update.end')
return self._view_builder.detail(req, volume)
def create_resource(ext_mgr):
return wsgi.Resource(VolumeController(ext_mgr))
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/v3/workers.py 0000664 0000000 0000000 00000010677 15131732575 0023016 0 ustar 00root root 0000000 0000000 # Copyright (c) 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import HTTPStatus
from oslo_utils import strutils
from oslo_utils import timeutils
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.schemas import workers as schema
from cinder.api.v3.views import workers as workers_view
from cinder.api import validation
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import cleanable
from cinder.policies import workers as policy
from cinder.scheduler import rpcapi as sch_rpc
from cinder import utils
class WorkerController(wsgi.Controller):
def __init__(self, *args, **kwargs):
self.sch_api = sch_rpc.SchedulerAPI()
@wsgi.Controller.api_version(mv.WORKERS_CLEANUP)
@wsgi.response(HTTPStatus.ACCEPTED)
@validation.schema(schema.cleanup)
def cleanup(self, req, body=None):
"""Do the cleanup on resources from a specific service/host/node."""
# Let the wsgi middleware convert NotAuthorized exceptions
ctxt = req.environ['cinder.context']
ctxt.authorize(policy.CLEAN_POLICY)
body = body or {}
for boolean in ('disabled', 'is_up'):
if body.get(boolean) is not None:
body[boolean] = strutils.bool_from_string(body[boolean])
resource_type = body.get('resource_type')
if resource_type:
resource_type = resource_type.title()
types = cleanable.CinderCleanableObject.cleanable_resource_types
if resource_type not in types:
valid_types = utils.build_or_str(types)
msg = _('Resource type %(resource_type)s not valid,'
' must be %(valid_types)s')
msg = msg % {"resource_type": resource_type,
"valid_types": valid_types}
raise exception.InvalidInput(reason=msg)
body['resource_type'] = resource_type
resource_id = body.get('resource_id')
if resource_id:
# If we have the resource type but we don't have where it is
# located, we get it from the DB to limit the distribution of the
# request by the scheduler, otherwise it will be distributed to all
# the services.
location_keys = {'service_id', 'cluster_name', 'host'}
if not location_keys.intersection(body):
workers = db.worker_get_all(ctxt, resource_id=resource_id,
binary=body.get('binary'),
resource_type=resource_type)
if len(workers) == 0:
msg = (_('There is no resource with UUID %s pending '
'cleanup.'), resource_id)
raise exception.InvalidInput(reason=msg)
if len(workers) > 1:
msg = (_('There are multiple resources with UUID %s '
'pending cleanup. Please be more specific.'),
resource_id)
raise exception.InvalidInput(reason=msg)
worker = workers[0]
body.update(service_id=worker.service_id,
resource_type=worker.resource_type)
body['until'] = timeutils.utcnow()
# NOTE(geguileo): If is_up is not specified in the request
# CleanupRequest's default will be used (False)
cleanup_request = objects.CleanupRequest(**body)
cleaning, unavailable = self.sch_api.work_cleanup(ctxt,
cleanup_request)
return {
'cleaning': workers_view.ViewBuilder.service_list(cleaning),
'unavailable': workers_view.ViewBuilder.service_list(unavailable),
}
def create_resource():
return wsgi.Resource(WorkerController())
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/validation/ 0000775 0000000 0000000 00000000000 15131732575 0022537 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/validation/__init__.py 0000664 0000000 0000000 00000004520 15131732575 0024651 0 ustar 00root root 0000000 0000000 # Copyright (C) 2017 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Request Body validating middleware.
"""
import functools
from cinder.api.openstack import api_version_request as api_version
from cinder.api.validation import validators
def schema(request_body_schema, min_version=None, max_version=None):
"""Register a schema to validate request body.
Registered schema will be used for validating request body just before
API method executing.
:param dict request_body_schema: a schema to validate request body
:param min_version: A string of two numerals. X.Y indicating the minimum
version of the JSON-Schema to validate against.
:param max_version: A string of two numerals. X.Y indicating the maximum
version of the JSON-Schema to validate against.
"""
def add_validator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
min_ver = api_version.APIVersionRequest(min_version)
max_ver = api_version.APIVersionRequest(max_version)
if 'req' in kwargs:
ver = kwargs['req'].api_version_request
else:
ver = args[1].api_version_request
if ver.matches(min_ver, max_ver):
# Only validate against the schema if it lies within
# the version range specified. Note that if both min
# and max are not specified the validator will always
# be run.
schema_validator = validators._SchemaValidator(
request_body_schema)
schema_validator.validate(kwargs['body'])
return func(*args, **kwargs)
wrapper._request_schema = request_body_schema
return wrapper
return add_validator
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/validation/parameter_types.py 0000664 0000000 0000000 00000020162 15131732575 0026316 0 ustar 00root root 0000000 0000000 # Copyright (C) 2017 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common parameter types for validating request Body.
"""
import copy
import re
import unicodedata
from cinder.common import constants
def _is_printable(char):
"""determine if a unicode code point is printable.
This checks if the character is either "other" (mostly control
codes), or a non-horizontal space. All characters that don't match
those criteria are considered printable; that is: letters;
combining marks; numbers; punctuation; symbols; (horizontal) space
separators.
"""
category = unicodedata.category(char)
return (not category.startswith("C") and
(not category.startswith("Z") or category == "Zs"))
def _get_all_chars():
for i in range(0xFFFF):
yield chr(i)
# build a regex that matches all printable characters. This allows
# spaces in the middle of the name. Also note that the regexp below
# deliberately allows the empty string. This is so only the constraint
# which enforces a minimum length for the name is triggered when an
# empty string is tested. Otherwise it is not deterministic which
# constraint fails and this causes issues for some unittests when
# PYTHONHASHSEED is set randomly.
def _build_regex_range(ws=True, invert=False, exclude=None):
"""Build a range regex for a set of characters in utf8.
This builds a valid range regex for characters in utf8 by
iterating the entire space and building up a set of x-y ranges for
all the characters we find which are valid.
:param ws: should we include whitespace in this range.
:param exclude: any characters we want to exclude
:param invert: invert the logic
The inversion is useful when we want to generate a set of ranges
which is everything that's not a certain class. For instance,
produce all the non printable characters as a set of ranges.
"""
if exclude is None:
exclude = []
regex = ""
# are we currently in a range
in_range = False
# last character we found, for closing ranges
last = None
# last character we added to the regex, this lets us know that we
# already have B in the range, which means we don't need to close
# it out with B-B. While the later seems to work, it's kind of bad form.
last_added = None
def valid_char(char):
if char in exclude:
result = False
elif ws:
result = _is_printable(char)
else:
# Zs is the unicode class for space characters, of which
# there are about 10 in this range.
result = (_is_printable(char) and
unicodedata.category(char) != "Zs")
if invert is True:
return not result
return result
# iterate through the entire character range. in_
for c in _get_all_chars():
if valid_char(c):
if not in_range:
regex += re.escape(c)
last_added = c
in_range = True
else:
if in_range and last != last_added:
regex += "-" + re.escape(last)
in_range = False
last = c
else:
if in_range:
regex += "-" + re.escape(c)
return regex
valid_description_regex_base = '^[%s]*$'
valid_description_regex = valid_description_regex_base % (
_build_regex_range())
name = {
'type': 'string', 'minLength': 1, 'maxLength': 255,
'format': 'name'
}
update_name = {
'type': ['string', 'null'], 'minLength': 1, 'maxLength': 255
}
description = {
'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255,
'pattern': valid_description_regex,
}
boolean = {
'type': ['boolean', 'string'],
'enum': [True, 'True', 'TRUE', 'true', '1', 'ON', 'On', 'on',
'YES', 'Yes', 'yes', 'y', 't',
False, 'False', 'FALSE', 'false', '0', 'OFF', 'Off', 'off',
'NO', 'No', 'no', 'n', 'f'],
}
uuid = {
'type': 'string', 'format': 'uuid'
}
extra_specs = {
'type': 'object',
'patternProperties': {
'^[a-zA-Z0-9-_:. /]{1,255}$': {
'type': 'string', 'maxLength': 255
}
},
'additionalProperties': False
}
image_metadata = {
'type': 'object',
'patternProperties': {
'^[a-zA-Z0-9-_:. /]{1,255}$': {
'type': 'string', 'format': 'mysql_text'
}
},
'additionalProperties': False
}
extra_specs_with_no_spaces_key = {
'type': 'object',
'patternProperties': {
'^[a-zA-Z0-9-_:.]{1,255}$': {
'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255
}
},
'additionalProperties': False
}
group_snapshot_status = {
'type': 'string', 'format': 'group_snapshot_status'
}
extra_specs_with_null = copy.deepcopy(extra_specs)
extra_specs_with_null['patternProperties'][
'^[a-zA-Z0-9-_:. /]{1,255}$']['type'] = ['string', 'null']
name_allow_zero_min_length = {
'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255
}
uuid_allow_null = {
'oneOf': [uuid, {'type': 'null'}]
}
metadata_allows_null = copy.deepcopy(extra_specs)
metadata_allows_null['type'] = ['object', 'null']
container = {
'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255}
backup_url = {'type': 'string', 'minLength': 1, 'format': 'base64'}
backup_service = {'type': 'string', 'minLength': 0, 'maxLength': 255}
nullable_string = {
'type': ('string', 'null'), 'minLength': 0, 'maxLength': 255
}
volume_size = {
'type': ['integer', 'string'],
'pattern': '^[0-9]+$',
'minimum': 1,
'maximum': constants.DB_MAX_INT
}
volume_size_allows_null = copy.deepcopy(volume_size)
volume_size_allows_null['type'] += ['null']
hostname = {
'type': ['string', 'null'], 'minLength': 1, 'maxLength': 255,
# NOTE: 'host' is defined in "services" table, and that
# means a hostname. The hostname grammar in RFC952 does
# not allow for underscores in hostnames. However, this
# schema allows them, because it sometimes occurs in
# real systems. As it is a cinder host, not a hostname,
# and due to some driver needs, colons and forward slashes
# were also included in the regex.
'pattern': '^[a-zA-Z0-9-._#@:/+]*$'
}
cinder_host = {
# A string that represents a cinder host.
# Examples:
# hostname
# hostname.domain
# hostname.domain@backend
# hostname.domain@backend#pool
# hostname.domain@backend#[dead:beef::cafe]:/complex_ipv6_pool_w_share
'type': ['string', 'null'], 'minLength': 1, 'maxLength': 255,
'pattern': r'^[a-zA-Z0-9-._#@:/+\[\]]*$' # hostname plus brackets
}
resource_type = {'type': ['string', 'null'], 'minLength': 0, 'maxLength': 40}
service_id = {
'type': ['integer', 'string', 'null'],
'pattern': '^[0-9]*$', 'maxLength': 11
}
optional_uuid = {'oneOf': [{'type': 'null'},
{'type': 'string', 'format': 'uuid'}]}
quota_class_set = {
'type': 'object',
'format': 'quota_class_set',
'patternProperties': {
'^[a-zA-Z0-9-_:. ]{1,300}$': {
'type': ['integer', 'string'],
'pattern': '^[0-9]*$', 'minimum': -1, 'minLength': 1,
'maximum': constants.DB_MAX_INT
}
},
'additionalProperties': False
}
binary = {
'type': 'string',
'enum': [binary for binary in constants.LOG_BINARIES + ('', '*')]
}
key_size = {'type': ['string', 'integer', 'null'],
'minimum': 0,
'maximum': constants.DB_MAX_INT,
'format': 'key_size'}
availability_zone = {
'type': ['string', 'null'], 'minLength': 1, 'maxLength': 255
}
optional_boolean = {'oneOf': [{'type': 'null'}, boolean]}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/validation/validators.py 0000664 0000000 0000000 00000044635 15131732575 0025275 0 ustar 00root root 0000000 0000000 # Copyright (C) 2017 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Internal implementation of request Body validating middleware.
"""
import re
import jsonschema
from jsonschema import exceptions as jsonschema_exc
from oslo_serialization import base64
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import webob.exc
from cinder.api import api_utils
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.objects import fields as c_fields
from cinder import quota
from cinder import utils
QUOTAS = quota.QUOTAS
GROUP_QUOTAS = quota.GROUP_QUOTAS
NON_QUOTA_KEYS = quota.NON_QUOTA_KEYS
def _soft_validate_additional_properties(
validator, additional_properties_value, param_value, schema):
"""Validator function.
If there are not any properties on the param_value that are not specified
in the schema, this will return without any effect. If there are any such
extra properties, they will be handled as follows:
- if the validator passed to the method is not of type "object", this
method will return without any effect.
- if the 'additional_properties_value' parameter is True, this method will
return without any effect.
- if the schema has an additionalProperties value of True, the extra
properties on the param_value will not be touched.
- if the schema has an additionalProperties value of False and there
aren't patternProperties specified, the extra properties will be stripped
from the param_value.
- if the schema has an additionalProperties value of False and there
are patternProperties specified, the extra properties will not be
touched and raise validation error if pattern doesn't match.
"""
if (not validator.is_type(param_value, "object") or
additional_properties_value):
return
properties = schema.get("properties", {})
patterns = "|".join(schema.get("patternProperties", {}))
extra_properties = set()
for prop in param_value:
if prop not in properties:
if patterns:
if not re.search(patterns, prop):
extra_properties.add(prop)
else:
extra_properties.add(prop)
if not extra_properties:
return
if patterns:
error = "Additional properties are not allowed (%s %s unexpected)"
if len(extra_properties) == 1:
verb = "was"
else:
verb = "were"
yield jsonschema_exc.ValidationError(
error % (", ".join(repr(extra) for extra in extra_properties),
verb))
else:
for prop in extra_properties:
del param_value[prop]
def _validate_string_length(value, entity_name, mandatory=False,
min_length=0, max_length=None,
remove_whitespaces=False):
"""Check the length of specified string.
:param value: the value of the string
:param entity_name: the name of the string
:mandatory: string is mandatory or not
:param min_length: the min_length of the string
:param max_length: the max_length of the string
:param remove_whitespaces: True if trimming whitespaces is needed
else False
"""
if not mandatory and not value:
return True
if mandatory and not value:
msg = _("The '%s' can not be None.") % entity_name
raise webob.exc.HTTPBadRequest(explanation=msg)
if remove_whitespaces:
value = value.strip()
utils.check_string_length(value, entity_name,
min_length=min_length,
max_length=max_length)
@jsonschema.FormatChecker.cls_checks('date-time')
def _validate_datetime_format(param_value):
try:
timeutils.parse_isotime(param_value)
except ValueError:
return False
else:
return True
@jsonschema.FormatChecker.cls_checks('name', exception.InvalidName)
def _validate_name(param_value):
if not param_value:
msg = _("The 'name' can not be None.")
raise exception.InvalidName(reason=msg)
elif len(param_value.strip()) == 0:
msg = _("The 'name' can not be empty.")
raise exception.InvalidName(reason=msg)
return True
@jsonschema.FormatChecker.cls_checks('name_skip_leading_trailing_spaces',
exception.InvalidName)
def _validate_name_skip_leading_trailing_spaces(param_value):
if not param_value:
msg = _("The 'name' can not be None.")
raise exception.InvalidName(reason=msg)
param_value = param_value.strip()
if len(param_value) == 0:
msg = _("The 'name' can not be empty.")
raise exception.InvalidName(reason=msg)
elif len(param_value) > 255:
msg = _("The 'name' can not be greater than 255 characters.")
raise exception.InvalidName(reason=msg)
return True
@jsonschema.FormatChecker.cls_checks('uuid')
def _validate_uuid_format(instance):
return uuidutils.is_uuid_like(instance)
@jsonschema.FormatChecker.cls_checks('group_snapshot_status')
def _validate_status(param_value):
if len(param_value.strip()) == 0:
msg = _("The 'status' can not be empty.")
raise exception.InvalidGroupSnapshotStatus(reason=msg)
elif param_value.lower() not in c_fields.GroupSnapshotStatus.ALL:
msg = _("Group snapshot status: %(status)s is invalid, "
"valid statuses are: "
"%(valid)s.") % {'status': param_value,
'valid': c_fields.GroupSnapshotStatus.ALL}
raise exception.InvalidGroupSnapshotStatus(reason=msg)
return True
@jsonschema.FormatChecker.cls_checks('progress')
def _validate_progress(progress):
if progress:
try:
integer = int(progress[:-1])
except ValueError:
msg = _('progress must be an integer percentage')
raise exception.InvalidInput(reason=msg)
if integer < 0 or integer > 100 or progress[-1] != '%':
msg = _('progress must be an integer percentage between'
' 0 and 100')
raise exception.InvalidInput(reason=msg)
return True
@jsonschema.FormatChecker.cls_checks('base64')
def _validate_base64_format(instance):
try:
if isinstance(instance, str):
instance = instance.encode('utf-8')
base64.decode_as_bytes(instance)
except TypeError:
# The name must be string type. If instance isn't string type, the
# TypeError will be raised at here.
return False
return True
@jsonschema.FormatChecker.cls_checks('disabled_reason',
exception.InvalidInput)
def _validate_disabled_reason(param_value):
_validate_string_length(param_value, 'disabled_reason',
mandatory=False, min_length=1, max_length=255,
remove_whitespaces=True)
return True
@jsonschema.FormatChecker.cls_checks(
'name_non_mandatory_remove_white_spaces')
def _validate_name_non_mandatory_remove_white_spaces(param_value):
_validate_string_length(param_value, 'name',
mandatory=False, min_length=0, max_length=255,
remove_whitespaces=True)
return True
@jsonschema.FormatChecker.cls_checks(
'description_non_mandatory_remove_white_spaces')
def _validate_description_non_mandatory_remove_white_spaces(param_value):
_validate_string_length(param_value, 'description',
mandatory=False, min_length=0, max_length=255,
remove_whitespaces=True)
return True
@jsonschema.FormatChecker.cls_checks('quota_set')
def _validate_quota_set(quota_set):
bad_keys = []
for key, value in quota_set.items():
if (key not in QUOTAS and key not in GROUP_QUOTAS and key not in
NON_QUOTA_KEYS):
bad_keys.append(key)
continue
if key in NON_QUOTA_KEYS:
continue
api_utils.validate_integer(value, key, min_value=-1,
max_value=db.MAX_INT)
if len(bad_keys) > 0:
msg = _("Bad key(s) in quota set: %s") % ", ".join(bad_keys)
raise exception.InvalidInput(reason=msg)
return True
@jsonschema.FormatChecker.cls_checks('quota_class_set')
def _validate_quota_class_set(instance):
bad_keys = []
for key in instance:
if key not in QUOTAS and key not in GROUP_QUOTAS:
bad_keys.append(key)
if len(bad_keys) > 0:
msg = _("Bad key(s) in quota class set: %s") % ", ".join(bad_keys)
raise exception.InvalidInput(reason=msg)
return True
@jsonschema.FormatChecker.cls_checks(
'group_status', webob.exc.HTTPBadRequest)
def _validate_group_status(param_value):
if param_value is None:
msg = _("The 'status' can not be None.")
raise webob.exc.HTTPBadRequest(explanation=msg)
if len(param_value.strip()) == 0:
msg = _("The 'status' can not be empty.")
raise exception.InvalidGroupStatus(reason=msg)
if param_value.lower() not in c_fields.GroupSnapshotStatus.ALL:
msg = _("Group status: %(status)s is invalid, valid status "
"are: %(valid)s.") % {'status': param_value,
'valid': c_fields.GroupStatus.ALL}
raise exception.InvalidGroupStatus(reason=msg)
return True
@jsonschema.FormatChecker.cls_checks('availability_zone')
def _validate_availability_zone(param_value):
if param_value is None:
return True
_validate_string_length(param_value, "availability_zone",
mandatory=True, min_length=1,
max_length=255, remove_whitespaces=True)
return True
@jsonschema.FormatChecker.cls_checks(
'group_type', (webob.exc.HTTPBadRequest, exception.InvalidInput))
def _validate_group_type(param_value):
_validate_string_length(param_value, 'group_type',
mandatory=True, min_length=1, max_length=255,
remove_whitespaces=True)
return True
@jsonschema.FormatChecker.cls_checks('level')
def _validate_log_level(level):
utils.get_log_method(level)
return True
@jsonschema.FormatChecker.cls_checks('validate_volume_reset_body')
def _validate_volume_reset_body(instance):
status = instance.get('status')
attach_status = instance.get('attach_status')
migration_status = instance.get('migration_status')
if not status and not attach_status and not migration_status:
msg = _("Must specify 'status', 'attach_status' or 'migration_status'"
" for update.")
raise exception.InvalidParameterValue(err=msg)
return True
@jsonschema.FormatChecker.cls_checks('volume_status')
def _validate_volume_status(param_value):
if param_value and param_value.lower() not in c_fields.VolumeStatus.ALL:
msg = _("Volume status: %(status)s is invalid, "
"valid statuses are: "
"%(valid)s.") % {'status': param_value,
'valid': c_fields.VolumeStatus.ALL}
raise exception.InvalidParameterValue(err=msg)
return True
@jsonschema.FormatChecker.cls_checks('volume_attach_status')
def _validate_volume_attach_status(param_value):
valid_attach_status = [c_fields.VolumeAttachStatus.ATTACHED,
c_fields.VolumeAttachStatus.DETACHED]
if param_value and param_value.lower() not in valid_attach_status:
msg = _("Volume attach status: %(status)s is invalid, "
"valid statuses are: "
"%(valid)s.") % {'status': param_value,
'valid': valid_attach_status}
raise exception.InvalidParameterValue(err=msg)
return True
@jsonschema.FormatChecker.cls_checks('volume_migration_status')
def _validate_volume_migration_status(param_value):
if param_value and (
param_value.lower() not in c_fields.VolumeMigrationStatus.ALL):
msg = _("Volume migration status: %(status)s is invalid, "
"valid statuses are: "
"%(valid)s.") % {'status': param_value,
'valid': c_fields.VolumeMigrationStatus.ALL}
raise exception.InvalidParameterValue(err=msg)
return True
@jsonschema.FormatChecker.cls_checks('snapshot_status')
def _validate_snapshot_status(param_value):
if not param_value or (
param_value.lower() not in c_fields.SnapshotStatus.ALL):
msg = _("Snapshot status: %(status)s is invalid, "
"valid statuses are: "
"%(valid)s.") % {'status': param_value,
'valid': c_fields.SnapshotStatus.ALL}
raise exception.InvalidParameterValue(err=msg)
return True
@jsonschema.FormatChecker.cls_checks('backup_status')
def _validate_backup_status(param_value):
valid_status = [c_fields.BackupStatus.AVAILABLE,
c_fields.BackupStatus.ERROR]
if not param_value or (
param_value.lower() not in valid_status):
msg = _("Backup status: %(status)s is invalid, "
"valid statuses are: "
"%(valid)s.") % {'status': param_value,
'valid': valid_status}
raise exception.InvalidParameterValue(err=msg)
return True
@jsonschema.FormatChecker.cls_checks('key_size')
def _validate_key_size(param_value):
if param_value is not None:
if not strutils.is_int_like(param_value):
raise exception.InvalidInput(reason=(
_('key_size must be an integer.')))
return True
@jsonschema.FormatChecker.cls_checks('mysql_text')
def _validate_mysql_text(param_value):
length = len(param_value.encode('utf8'))
if length > 65535:
return False
return True
class FormatChecker(jsonschema.FormatChecker):
"""A FormatChecker can output the message from cause exception
We need understandable validation errors messages for users. When a
custom checker has an exception, the FormatChecker will output a
readable message provided by the checker.
"""
def check(self, param_value, format):
"""Check whether the param_value conforms to the given format.
:argument param_value: the param_value to check
:type: any primitive type (str, number, bool)
:argument str format: the format that param_value should conform to
:raises: :exc:`FormatError` if param_value does not conform to format
"""
if format not in self.checkers:
return
# For safety reasons custom checkers can be registered with
# allowed exception types. Anything else will fall into the
# default formatter.
func, raises = self.checkers[format]
result, cause = None, None
try:
result = func(param_value)
except raises as e:
cause = e
if not result:
msg = "%r is not a %r" % (param_value, format)
raise jsonschema_exc.FormatError(msg, cause=cause)
class _SchemaValidator(object):
"""A validator class
This class is changed from Draft4Validator to validate minimum/maximum
value of a string number(e.g. '10'). This changes can be removed when
we tighten up the API definition and the XML conversion.
Also FormatCheckers are added for checking data formats which would be
passed through cinder api commonly.
"""
validator = None
validator_org = jsonschema.Draft4Validator
def __init__(self, schema, relax_additional_properties=False):
validators = {
'minimum': self._validate_minimum,
'maximum': self._validate_maximum,
}
if relax_additional_properties:
validators[
'additionalProperties'] = _soft_validate_additional_properties
validator_cls = jsonschema.validators.extend(self.validator_org,
validators)
format_checker = FormatChecker()
self.validator = validator_cls(schema, format_checker=format_checker)
def validate(self, *args, **kwargs):
try:
self.validator.validate(*args, **kwargs)
except jsonschema.ValidationError as ex:
if isinstance(ex.cause, exception.InvalidName):
detail = ex.cause.msg
elif len(ex.path) > 0:
detail = _("Invalid input for field/attribute %(path)s."
" Value: %(value)s. %(message)s") % {
'path': ex.path.pop(), 'value': ex.instance,
'message': ex.message
}
else:
detail = ex.message
raise exception.ValidationError(detail=detail)
except TypeError as ex:
# NOTE: If passing non string value to patternProperties parameter,
# TypeError happens. Here is for catching the TypeError.
detail = str(ex)
raise exception.ValidationError(detail=detail)
def _number_from_str(self, param_value):
try:
value = int(param_value)
except (ValueError, TypeError):
try:
value = float(param_value)
except (ValueError, TypeError):
return None
return value
def _validate_minimum(self, validator, minimum, param_value, schema):
param_value = self._number_from_str(param_value)
if param_value is None:
return
return self.validator_org.VALIDATORS['minimum'](validator, minimum,
param_value, schema)
def _validate_maximum(self, validator, maximum, param_value, schema):
param_value = self._number_from_str(param_value)
if param_value is None:
return
return self.validator_org.VALIDATORS['maximum'](validator, maximum,
param_value, schema)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/versions.py 0000664 0000000 0000000 00000005700 15131732575 0022631 0 ustar 00root root 0000000 0000000 # Copyright 2010 OpenStack Foundation
# Copyright 2015 Clinton Knight
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from http import HTTPStatus
from oslo_config import cfg
from oslo_service import wsgi as base_wsgi
from cinder.api.openstack import api_version_request
from cinder.api.openstack import wsgi
from cinder.api.views import versions as views_versions
CONF = cfg.CONF
_LINKS = [{
"rel": "describedby",
"type": "text/html",
"href": "https://docs.openstack.org/",
}]
_KNOWN_VERSIONS = {
"v3.0": {
"id": "v3.0",
"status": "CURRENT",
"version": api_version_request._MAX_API_VERSION,
"min_version": api_version_request._MIN_API_VERSION,
"updated": api_version_request.UPDATED,
"links": _LINKS,
"media-types": [{
"base": "application/json",
"type": "application/vnd.openstack.volume+json;version=3",
}]
},
}
class Versions(base_wsgi.Router):
"""Route versions requests."""
def __init__(self):
mapper = wsgi.APIMapper()
controller = create_resource()
mapper.connect('versions', '/', controller=controller, action='all')
mapper.redirect('', '/')
super().__init__(mapper)
@classmethod
def factory(cls, global_config, **local_config):
"""Simple paste factory.
:class:`oslo_service.wsgi.Router` doesn't have this.
"""
return cls()
class VersionsController(wsgi.Controller):
def __init__(self):
super(VersionsController, self).__init__(None)
@wsgi.Controller.api_version('3.0')
def index(self, req): # pylint: disable=E0102
"""Return versions supported after the start of microversions."""
builder = views_versions.get_view_builder(req)
known_versions = copy.deepcopy(_KNOWN_VERSIONS)
return builder.build_versions(known_versions)
# NOTE (cknight): Calling the versions API without
# /v3 in the URL will lead to this unversioned
# method, which should always return info about all
# available versions.
@wsgi.response(HTTPStatus.MULTIPLE_CHOICES)
def all(self, req):
"""Return all known and enabled versions."""
builder = views_versions.get_view_builder(req)
known_versions = copy.deepcopy(_KNOWN_VERSIONS)
return builder.build_versions(known_versions)
def create_resource():
return wsgi.Resource(VersionsController())
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/views/ 0000775 0000000 0000000 00000000000 15131732575 0021542 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/views/__init__.py 0000664 0000000 0000000 00000000000 15131732575 0023641 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/views/availability_zones.py 0000664 0000000 0000000 00000002073 15131732575 0026006 0 ustar 00root root 0000000 0000000 # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cinder.api.common
class ViewBuilder(cinder.api.common.ViewBuilder):
"""Map cinder.volumes.api list_availability_zones response into dicts."""
def list(self, request, availability_zones):
def fmt(az):
return {
'zoneName': az['name'],
'zoneState': {'available': az['available']},
}
return {'availabilityZoneInfo': [fmt(az) for az in availability_zones]}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/views/backups.py 0000664 0000000 0000000 00000010215 15131732575 0023543 0 ustar 00root root 0000000 0000000 # Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import common
class ViewBuilder(common.ViewBuilder):
"""Model backup API responses as a python dictionary."""
_collection_name = "backups"
def __init__(self):
"""Initialize view builder."""
super(ViewBuilder, self).__init__()
def summary_list(self, request, backups, backup_count=None):
"""Show a list of backups without many details."""
return self._list_view(self.summary, request, backups, backup_count)
def detail_list(self, request, backups, backup_count=None):
"""Detailed view of a list of backups ."""
return self._list_view(self.detail, request, backups, backup_count,
self._collection_name + '/detail')
def summary(self, request, backup):
"""Generic, non-detailed view of a backup."""
return {
'backup': {
'id': backup['id'],
'name': backup['display_name'],
'links': self._get_links(request,
backup['id']),
},
}
def restore_summary(self, request, restore):
"""Generic, non-detailed view of a restore."""
return {
'restore': {
'backup_id': restore['backup_id'],
'volume_id': restore['volume_id'],
'volume_name': restore['volume_name'],
},
}
def detail(self, request, backup):
"""Detailed view of a single backup."""
backup_dict = {
'backup': {
'id': backup.get('id'),
'status': backup.get('status'),
'size': backup.get('size'),
'object_count': backup.get('object_count'),
'availability_zone': backup.get('availability_zone'),
'container': backup.get('container'),
'created_at': backup.get('created_at'),
'updated_at': backup.get('updated_at'),
'name': backup.get('display_name'),
'description': backup.get('display_description'),
'fail_reason': backup.get('fail_reason'),
'volume_id': backup.get('volume_id'),
'links': self._get_links(request, backup['id']),
'is_incremental': backup.is_incremental,
'has_dependent_backups': backup.has_dependent_backups,
'snapshot_id': backup.snapshot_id,
'data_timestamp': backup.data_timestamp,
}
}
return backup_dict
def _list_view(self, func, request, backups, backup_count,
coll_name=_collection_name):
"""Provide a view for a list of backups."""
backups_list = [func(request, backup)['backup'] for backup in backups]
backups_links = self._get_collection_links(request,
backups,
coll_name,
backup_count)
backups_dict = dict(backups=backups_list)
if backups_links:
backups_dict['backups_links'] = backups_links
if backup_count is not None:
backups_dict['count'] = backup_count
return backups_dict
def export_summary(self, request, export):
"""Generic view of an export."""
return {
'backup-record': {
'backup_service': export['backup_service'],
'backup_url': export['backup_url'],
},
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/views/capabilities.py 0000664 0000000 0000000 00000004104 15131732575 0024544 0 ustar 00root root 0000000 0000000 # Copyright (c) 2015 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import common
class ViewBuilder(common.ViewBuilder):
"""Model capabilities API responses as a python dictionary."""
_collection_name = "capabilities"
def __init__(self):
"""Initialize view builder."""
super(ViewBuilder, self).__init__()
def summary(self, request, capabilities, id):
"""Summary view of a backend capabilities."""
# Internally storage_protocol can be a list with all the variants (eg.
# FC, fibre_channel), but we return a single value to users. The first
# value is the preferred variant.
storage_protocol = capabilities.get('storage_protocol')
if isinstance(storage_protocol, list):
storage_protocol = storage_protocol[0]
return {
'namespace': 'OS::Storage::Capabilities::%s' % id,
'vendor_name': capabilities.get('vendor_name'),
'volume_backend_name': capabilities.get('volume_backend_name'),
'pool_name': capabilities.get('pool_name'),
'driver_version': capabilities.get('driver_version'),
'storage_protocol': storage_protocol,
'display_name': capabilities.get('display_name'),
'description': capabilities.get('description'),
'visibility': capabilities.get('visibility'),
'replication_targets': capabilities.get('replication_targets', []),
'properties': capabilities.get('properties'),
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/views/cgsnapshots.py 0000664 0000000 0000000 00000005017 15131732575 0024453 0 ustar 00root root 0000000 0000000 # Copyright (C) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import common
class ViewBuilder(common.ViewBuilder):
"""Model cgsnapshot API responses as a python dictionary."""
_collection_name = "cgsnapshots"
def __init__(self):
"""Initialize view builder."""
super(ViewBuilder, self).__init__()
def summary_list(self, request, cgsnapshots):
"""Show a list of cgsnapshots without many details."""
return self._list_view(self.summary, request, cgsnapshots)
def detail_list(self, request, cgsnapshots):
"""Detailed view of a list of cgsnapshots ."""
return self._list_view(self.detail, request, cgsnapshots)
def summary(self, request, cgsnapshot):
"""Generic, non-detailed view of a cgsnapshot."""
return {
'cgsnapshot': {
'id': cgsnapshot.id,
'name': cgsnapshot.name
}
}
def detail(self, request, cgsnapshot):
"""Detailed view of a single cgsnapshot."""
try:
group_id = cgsnapshot.consistencygroup_id
except AttributeError:
try:
group_id = cgsnapshot.group_id
except AttributeError:
group_id = None
else:
group_id = None
return {
'cgsnapshot': {
'id': cgsnapshot.id,
'consistencygroup_id': group_id,
'status': cgsnapshot.status,
'created_at': cgsnapshot.created_at,
'name': cgsnapshot.name,
'description': cgsnapshot.description
}
}
def _list_view(self, func, request, cgsnapshots):
"""Provide a view for a list of cgsnapshots."""
cgsnapshots_list = [func(request, cgsnapshot)['cgsnapshot']
for cgsnapshot in cgsnapshots]
cgsnapshots_dict = dict(cgsnapshots=cgsnapshots_list)
return cgsnapshots_dict
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/views/consistencygroups.py 0000664 0000000 0000000 00000006367 15131732575 0025731 0 ustar 00root root 0000000 0000000 # Copyright (C) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import common
class ViewBuilder(common.ViewBuilder):
"""Model consistencygroup API responses as a python dictionary."""
_collection_name = "consistencygroups"
def __init__(self):
"""Initialize view builder."""
super(ViewBuilder, self).__init__()
def summary_list(self, request, consistencygroups):
"""Show a list of consistency groups without many details."""
return self._list_view(self.summary, request, consistencygroups)
def detail_list(self, request, consistencygroups):
"""Detailed view of a list of consistency groups ."""
return self._list_view(self.detail, request, consistencygroups)
def summary(self, request, consistencygroup):
"""Generic, non-detailed view of a consistency group."""
return {
'consistencygroup': {
'id': consistencygroup.id,
'name': consistencygroup.name
}
}
def detail(self, request, consistencygroup):
"""Detailed view of a single consistency group."""
try:
volume_types = (consistencygroup.volume_type_id.split(",")
if consistencygroup.volume_type_id else [])
volume_types = [type_id for type_id in volume_types if type_id]
except AttributeError:
try:
volume_types = [v_type.id for v_type in
consistencygroup.volume_types]
except AttributeError:
volume_types = []
return {
'consistencygroup': {
'id': consistencygroup.id,
'status': consistencygroup.status,
'availability_zone': consistencygroup.availability_zone,
'created_at': consistencygroup.created_at,
'name': consistencygroup.name,
'description': consistencygroup.description,
'volume_types': volume_types,
}
}
def _list_view(self, func, request, consistencygroups):
"""Provide a view for a list of consistency groups."""
consistencygroups_list = [
func(request, consistencygroup)['consistencygroup']
for consistencygroup in consistencygroups]
cg_links = self._get_collection_links(request,
consistencygroups,
self._collection_name)
consistencygroups_dict = dict(consistencygroups=consistencygroups_list)
if cg_links:
consistencygroups_dict['consistencygroup_links'] = cg_links
return consistencygroups_dict
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/views/limits.py 0000664 0000000 0000000 00000006122 15131732575 0023416 0 ustar 00root root 0000000 0000000 # Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
class ViewBuilder(object):
"""OpenStack API base limits view builder."""
def build(self, rate_limits, absolute_limits):
rate_limits = self._build_rate_limits(rate_limits)
absolute_limits = self._build_absolute_limits(absolute_limits)
output = {
"limits": {
"rate": rate_limits,
"absolute": absolute_limits,
},
}
return output
def _build_absolute_limits(self, absolute_limits):
"""Builder for absolute limits
absolute_limits should be given as a dict of limits.
For example: {"ram": 512, "gigabytes": 1024}.
"""
limit_names = {
"gigabytes": ["maxTotalVolumeGigabytes"],
"backup_gigabytes": ["maxTotalBackupGigabytes"],
"volumes": ["maxTotalVolumes"],
"snapshots": ["maxTotalSnapshots"],
"backups": ["maxTotalBackups"],
}
limits = {}
for name, value in absolute_limits.items():
if name in limit_names and value is not None:
for name in limit_names[name]:
limits[name] = value
return limits
def _build_rate_limits(self, rate_limits):
limits = []
for rate_limit in rate_limits:
_rate_limit_key = None
_rate_limit = self._build_rate_limit(rate_limit)
# check for existing key
for limit in limits:
if (limit["uri"] == rate_limit["URI"] and
limit["regex"] == rate_limit["regex"]):
_rate_limit_key = limit
break
# ensure we have a key if we didn't find one
if not _rate_limit_key:
_rate_limit_key = {
"uri": rate_limit["URI"],
"regex": rate_limit["regex"],
"limit": [],
}
limits.append(_rate_limit_key)
_rate_limit_key["limit"].append(_rate_limit)
return limits
def _build_rate_limit(self, rate_limit):
next_avail = datetime.datetime.fromtimestamp(
rate_limit["resetTime"], tz=datetime.timezone.utc)
return {
"verb": rate_limit["verb"],
"value": rate_limit["value"],
"remaining": int(rate_limit["remaining"]),
"unit": rate_limit["unit"],
"next-available": next_avail.replace(tzinfo=None).isoformat(),
}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/views/manageable_snapshots.py 0000664 0000000 0000000 00000004340 15131732575 0026273 0 ustar 00root root 0000000 0000000 # Copyright (c) 2016 Stratoscale, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import common
class ViewBuilder(common.ViewBuilder):
"""Model manageable snapshot responses as a python dictionary."""
_collection_name = "os-snapshot-manage"
def summary_list(self, request, snapshots, count):
"""Show a list of manageable snapshots without many details."""
return self._list_view(self.summary, request, snapshots, count)
def detail_list(self, request, snapshots, count):
"""Detailed view of a list of manageable snapshots."""
return self._list_view(self.detail, request, snapshots, count)
def summary(self, request, snapshot):
"""Generic, non-detailed view of a manageable snapshot description."""
return {
'reference': snapshot['reference'],
'size': snapshot['size'],
'safe_to_manage': snapshot['safe_to_manage'],
'source_reference': snapshot['source_reference']
}
def detail(self, request, snapshot):
"""Detailed view of a manageable snapshot description."""
return {
'reference': snapshot['reference'],
'size': snapshot['size'],
'safe_to_manage': snapshot['safe_to_manage'],
'reason_not_safe': snapshot['reason_not_safe'],
'extra_info': snapshot['extra_info'],
'cinder_id': snapshot['cinder_id'],
'source_reference': snapshot['source_reference']
}
def _list_view(self, func, request, snapshots, count):
"""Provide a view for a list of manageable snapshots."""
snap_list = [func(request, snapshot) for snapshot in snapshots]
return {"manageable-snapshots": snap_list}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/views/manageable_volumes.py 0000664 0000000 0000000 00000004066 15131732575 0025750 0 ustar 00root root 0000000 0000000 # Copyright (c) 2016 Stratoscale, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import common
class ViewBuilder(common.ViewBuilder):
"""Model manageable volume responses as a python dictionary."""
_collection_name = "os-volume-manage"
def summary_list(self, request, volumes, count):
"""Show a list of manageable volumes without many details."""
return self._list_view(self.summary, request, volumes, count)
def detail_list(self, request, volumes, count):
"""Detailed view of a list of manageable volumes."""
return self._list_view(self.detail, request, volumes, count)
def summary(self, request, volume):
"""Generic, non-detailed view of a manageable volume description."""
return {
'reference': volume['reference'],
'size': volume['size'],
'safe_to_manage': volume['safe_to_manage']
}
def detail(self, request, volume):
"""Detailed view of a manageable volume description."""
return {
'reference': volume['reference'],
'size': volume['size'],
'safe_to_manage': volume['safe_to_manage'],
'reason_not_safe': volume['reason_not_safe'],
'cinder_id': volume['cinder_id'],
'extra_info': volume['extra_info']
}
def _list_view(self, func, request, volumes, count):
"""Provide a view for a list of manageable volumes."""
vol_list = [func(request, volume) for volume in volumes]
return {"manageable-volumes": vol_list}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/views/qos_specs.py 0000664 0000000 0000000 00000004632 15131732575 0024120 0 ustar 00root root 0000000 0000000 # Copyright (C) 2013 eBay Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import common
class ViewBuilder(common.ViewBuilder):
"""Model QoS specs API responses as a python dictionary."""
_collection_name = "qos-specs"
def __init__(self):
"""Initialize view builder."""
super(ViewBuilder, self).__init__()
def summary_list(self, request, qos_specs, qos_count=None):
"""Show a list of qos_specs without many details."""
return self._list_view(self.detail, request, qos_specs, qos_count)
def summary(self, request, qos_spec):
"""Generic, non-detailed view of a qos_specs."""
return self.detail(request, qos_spec)
def detail(self, request, qos_spec):
"""Detailed view of a single qos_spec."""
# TODO(zhiteng) Add associations to detailed view
return {
'qos_specs': {
'id': qos_spec.id,
'name': qos_spec.name,
'consumer': qos_spec.consumer,
'specs': qos_spec.specs,
},
'links': self._get_links(request,
qos_spec.id),
}
def associations(self, request, associates):
"""View of qos specs associations."""
return {
'qos_associations': associates
}
def _list_view(self, func, request, qos_specs, qos_count=None):
"""Provide a view for a list of qos_specs."""
specs_list = [func(request, specs)['qos_specs'] for specs in qos_specs]
specs_links = self._get_collection_links(request, qos_specs,
self._collection_name,
qos_count)
specs_dict = dict(qos_specs=specs_list)
if specs_links:
specs_dict['qos_specs_links'] = specs_links
return specs_dict
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/views/scheduler_stats.py 0000664 0000000 0000000 00000004236 15131732575 0025315 0 ustar 00root root 0000000 0000000 # Copyright (C) 2014 eBay Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import common
class ViewBuilder(common.ViewBuilder):
"""Model scheduler-stats API responses as a python dictionary."""
_collection_name = "scheduler-stats"
def __init__(self):
"""Initialize view builder."""
super(ViewBuilder, self).__init__()
def summary(self, request, pool):
"""Summary view of a single pool."""
return {
'pool': {
'name': pool.get('name'),
}
}
def detail(self, request, pool):
"""Detailed view of a single pool."""
# Internally storage_protocol can be a list with all the variants (eg.
# FC, fibre_channel), but we return a single value to users. The first
# value is the preferred variant.
capabilities = pool.get('capabilities')
if capabilities:
protocol = capabilities.get('storage_protocol')
if isinstance(protocol, list):
capabilities = capabilities.copy()
capabilities['storage_protocol'] = protocol[0]
return {
'pool': {
'name': pool.get('name'),
'capabilities': capabilities,
}
}
def pools(self, request, pools, detail):
"""Detailed/Summary view of a list of pools seen by scheduler."""
if detail:
plist = [self.detail(request, pool)['pool'] for pool in pools]
else:
plist = [self.summary(request, pool)['pool'] for pool in pools]
pools_dict = dict(pools=plist)
return pools_dict
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/views/snapshots.py 0000664 0000000 0000000 00000006163 15131732575 0024144 0 ustar 00root root 0000000 0000000 # Copyright (c) 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import common
class ViewBuilder(common.ViewBuilder):
"""Model snapshot API responses as a python dictionary."""
_collection_name = "snapshots"
def __init__(self):
"""Initialize view builder."""
super(ViewBuilder, self).__init__()
def summary_list(self, request, snapshots, snapshot_count=None):
"""Show a list of snapshots without many details."""
return self._list_view(self.summary, request, snapshots,
snapshot_count)
def detail_list(self, request, snapshots, snapshot_count=None):
"""Detailed view of a list of snapshots."""
return self._list_view(self.detail, request, snapshots, snapshot_count,
coll_name=self._collection_name + '/detail')
def summary(self, request, snapshot):
"""Generic, non-detailed view of a snapshot."""
if isinstance(snapshot.metadata, dict):
metadata = snapshot.metadata
else:
metadata = {}
return {
'snapshot': {
'id': snapshot.id,
'created_at': snapshot.created_at,
'updated_at': snapshot.updated_at,
'name': snapshot.display_name,
'description': snapshot.display_description,
'volume_id': snapshot.volume_id,
'status': snapshot.status,
'size': snapshot.volume_size,
'metadata': metadata,
}
}
def detail(self, request, snapshot):
"""Detailed view of a single snapshot."""
# NOTE(geguileo): No additional data at the moment
return self.summary(request, snapshot)
def _list_view(self, func, request, snapshots, snapshot_count,
coll_name=_collection_name):
"""Provide a view for a list of snapshots."""
snapshots_list = [func(request, snapshot)['snapshot']
for snapshot in snapshots]
snapshots_links = self._get_collection_links(request,
snapshots,
coll_name,
snapshot_count)
snapshots_dict = {self._collection_name: snapshots_list}
if snapshots_links:
snapshots_dict[self._collection_name + '_links'] = snapshots_links
if snapshot_count is not None:
snapshots_dict['count'] = snapshot_count
return snapshots_dict
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/views/transfers.py 0000664 0000000 0000000 00000011330 15131732575 0024121 0 ustar 00root root 0000000 0000000 # Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import common
from cinder.api import microversions as mv
class ViewBuilder(common.ViewBuilder):
"""Model transfer API responses as a python dictionary."""
_collection_name = "os-volume-transfer"
def __init__(self):
"""Initialize view builder."""
super(ViewBuilder, self).__init__()
def summary_list(self, request, transfers, origin_transfer_count):
"""Show a list of transfers without many details."""
return self._list_view(self.summary, request, transfers,
origin_transfer_count)
def detail_list(self, request, transfers, origin_transfer_count):
"""Detailed view of a list of transfers ."""
return self._list_view(self.detail, request, transfers,
origin_transfer_count)
def summary(self, request, transfer):
"""Generic, non-detailed view of a transfer."""
return {
'transfer': {
'id': transfer['id'],
'volume_id': transfer.get('volume_id'),
'name': transfer['display_name'],
'links': self._get_links(request,
transfer['id']),
},
}
def detail(self, request, transfer):
"""Detailed view of a single transfer."""
detail_body = {
'transfer': {
'id': transfer.get('id'),
'created_at': transfer.get('created_at'),
'name': transfer.get('display_name'),
'volume_id': transfer.get('volume_id'),
'links': self._get_links(request, transfer['id'])
}
}
req_version = request.api_version_request
if req_version.matches(mv.TRANSFER_WITH_SNAPSHOTS):
detail_body['transfer'].update({'no_snapshots':
transfer.get('no_snapshots')})
if req_version.matches(mv.TRANSFER_WITH_HISTORY):
transfer_history = {
'destination_project_id': transfer['destination_project_id'],
'source_project_id': transfer['source_project_id'],
'accepted': transfer['accepted']
}
detail_body['transfer'].update(transfer_history)
return detail_body
def create(self, request, transfer):
"""Detailed view of a single transfer when created."""
create_body = {
'transfer': {
'id': transfer.get('id'),
'created_at': transfer.get('created_at'),
'name': transfer.get('display_name'),
'volume_id': transfer.get('volume_id'),
'auth_key': transfer.get('auth_key'),
'links': self._get_links(request, transfer['id'])
}
}
req_version = request.api_version_request
if req_version.matches(mv.TRANSFER_WITH_SNAPSHOTS):
create_body['transfer'].update({'no_snapshots':
transfer.get('no_snapshots')})
if req_version.matches(mv.TRANSFER_WITH_HISTORY):
transfer_history = {
'destination_project_id': transfer['destination_project_id'],
'source_project_id': transfer['source_project_id'],
'accepted': transfer['accepted']
}
create_body['transfer'].update(transfer_history)
return create_body
def _list_view(self, func, request, transfers, origin_transfer_count):
"""Provide a view for a list of transfers."""
transfers_list = [func(request, transfer)['transfer'] for transfer in
transfers]
transfers_links = self._get_collection_links(request,
transfers,
self._collection_name,
origin_transfer_count)
transfers_dict = dict(transfers=transfers_list)
if transfers_links:
transfers_dict['transfers_links'] = transfers_links
return transfers_dict
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/views/types.py 0000664 0000000 0000000 00000002652 15131732575 0023265 0 ustar 00root root 0000000 0000000 # Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import common
class ViewBuilder(common.ViewBuilder):
def show(self, request, volume_type, brief=False):
"""Trim away extraneous volume type attributes."""
trimmed = dict(id=volume_type.get('id'),
name=volume_type.get('name'),
is_public=volume_type.get('is_public'),
extra_specs=volume_type.get('extra_specs'),
description=volume_type.get('description'))
return trimmed if brief else dict(volume_type=trimmed)
def index(self, request, volume_types):
"""Index over trimmed volume types."""
volume_types_list = [self.show(request, volume_type, True)
for volume_type in volume_types]
return dict(volume_types=volume_types_list)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/api/views/versions.py 0000664 0000000 0000000 00000005460 15131732575 0023771 0 ustar 00root root 0000000 0000000 # Copyright 2010-2011 OpenStack Foundation
# Copyright 2015 Clinton Knight
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import re
import urllib
from oslo_config import cfg
versions_opts = [
cfg.StrOpt('public_endpoint',
help="Public url to use for versions endpoint. The default "
"is None, which will use the request's host_url "
"attribute to populate the URL base. If Cinder is "
"operating behind a proxy, you will want to change "
"this to represent the proxy's URL."),
]
CONF = cfg.CONF
CONF.register_opts(versions_opts)
def get_view_builder(req):
base_url = CONF.public_endpoint or req.application_url
return ViewBuilder(base_url)
class ViewBuilder(object):
def __init__(self, base_url):
"""Initialize ViewBuilder.
:param base_url: url of the root wsgi application
"""
self.base_url = base_url
def build_versions(self, versions):
views = [self._build_version(versions[key])
for key in sorted(list(versions.keys()))]
return dict(versions=views)
def _build_version(self, version):
view = copy.deepcopy(version)
view['links'] = self._build_links(version)
return view
def _build_links(self, version_data):
"""Generate a container of links that refer to the provided version."""
links = copy.deepcopy(version_data.get('links', {}))
version_num = version_data["id"].split('.')[0]
links.append({'rel': 'self',
'href': self._generate_href(version=version_num)})
return links
def _generate_href(self, version='v3', path=None):
"""Create a URL that refers to a specific version_number."""
base_url = self._get_base_url_without_version()
# Always add '/' to base_url end for urljoin href url
base_url = base_url.rstrip('/') + '/'
rel_version = version.lstrip('/')
href = urllib.parse.urljoin(base_url, rel_version).rstrip('/') + '/'
if path:
href += path.lstrip('/')
return href
def _get_base_url_without_version(self):
"""Get the base URL with out the /v3 suffix."""
return re.sub('v[1-9]+/?$', '', self.base_url)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/backup/ 0000775 0000000 0000000 00000000000 15131732575 0021101 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/backup/__init__.py 0000664 0000000 0000000 00000002002 15131732575 0023204 0 ustar 00root root 0000000 0000000 # Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Importing full names to not pollute the namespace and cause possible
# collisions with use of 'from cinder.backup import ' elsewhere.
from oslo_utils import importutils
from cinder.common import config
CONF = config.CONF
def API(*args, **kwargs):
class_name = CONF.backup_api_class
return importutils.import_object(class_name, *args, **kwargs)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/backup/api.py 0000664 0000000 0000000 00000065345 15131732575 0022241 0 ustar 00root root 0000000 0000000 # Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2014 TrilioData, Inc
# Copyright (c) 2015 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests relating to the volume backups service."""
from datetime import datetime
import random
from typing import Optional
from zoneinfo import ZoneInfo
from eventlet import greenthread
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import strutils
from cinder.backup import rpcapi as backup_rpcapi
from cinder.common import constants
from cinder import context
from cinder.db import base
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
from cinder.policies import backup_actions as backup_action_policy
from cinder.policies import backups as policy
import cinder.policy
from cinder import quota
from cinder import quota_utils
from cinder.scheduler import rpcapi as scheduler_rpcapi
import cinder.volume
backup_opts = [
cfg.BoolOpt('backup_use_same_host',
default=False,
help='Backup services use same backend.')
]
CONF = cfg.CONF
CONF.register_opts(backup_opts)
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
IMPORT_VOLUME_ID = '00000000-0000-0000-0000-000000000000'
class API(base.Base):
"""API for interacting with the volume backup manager."""
def __init__(self):
self.backup_rpcapi = backup_rpcapi.BackupAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.volume_api = cinder.volume.API()
super().__init__()
def get(self,
context: context.RequestContext,
backup_id: str) -> 'objects.Backup':
backup = objects.Backup.get_by_id(context, backup_id)
context.authorize(policy.GET_POLICY, target_obj=backup)
return backup
def _check_support_to_force_delete(self,
context: context.RequestContext,
backup_host: str) -> bool:
result = self.backup_rpcapi.check_support_to_force_delete(context,
backup_host)
return result
def delete(self,
context: context.RequestContext,
backup: 'objects.Backup',
force: bool = False) -> None:
"""Make the RPC call to delete a volume backup.
Call backup manager to execute backup delete or force delete operation.
:param context: running context
:param backup: the dict of backup that is got from DB.
:param force: indicate force delete or not
:raises InvalidBackup:
:raises BackupDriverException:
:raises ServiceNotFound:
"""
context.authorize(policy.DELETE_POLICY, target_obj=backup)
if not force and backup.status not in [fields.BackupStatus.AVAILABLE,
fields.BackupStatus.ERROR]:
msg = _('Backup status must be available or error')
raise exception.InvalidBackup(reason=msg)
if force and not self._check_support_to_force_delete(context,
backup.host):
msg = _('force delete')
raise exception.NotSupportedOperation(operation=msg)
# Don't allow backup to be deleted if there are incremental
# backups dependent on it.
deltas = self.get_all(context, search_opts={'parent_id': backup.id})
if deltas and len(deltas):
msg = _('Incremental backups exist for this backup.')
raise exception.InvalidBackup(reason=msg)
backup.status = fields.BackupStatus.DELETING
backup.host = self._get_available_backup_service_host(
backup.host, backup.availability_zone)
backup.save()
self.backup_rpcapi.delete_backup(context, backup)
def get_all(self,
context: context.RequestContext,
search_opts: Optional[dict] = None,
marker: Optional[str] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
sort_keys: Optional[list[str]] = None,
sort_dirs: Optional[list[str]] = None) -> 'objects.BackupList':
context.authorize(policy.GET_ALL_POLICY)
search_opts = search_opts or {}
all_tenants = search_opts.pop('all_tenants', '0')
if not strutils.is_valid_boolstr(all_tenants):
msg = _("all_tenants must be a boolean, got '%s'.") % all_tenants
raise exception.InvalidParameterValue(err=msg)
if context.is_admin and strutils.bool_from_string(all_tenants):
backups = objects.BackupList.get_all(context, search_opts,
marker, limit, offset,
sort_keys, sort_dirs)
else:
backups = objects.BackupList.get_all_by_project(
context, context.project_id, search_opts,
marker, limit, offset, sort_keys, sort_dirs
)
return backups
def _az_matched(self,
service: 'objects.Service',
availability_zone: str) -> bool:
return ((not availability_zone) or
service.availability_zone == availability_zone)
def _is_backup_service_enabled(self,
availability_zone: str,
host: str) -> bool:
"""Check if there is a backup service available."""
topic = constants.BACKUP_TOPIC
ctxt = context.get_admin_context()
services = objects.ServiceList.get_all_by_topic(
ctxt, topic, disabled=False)
for srv in services:
if (self._az_matched(srv, availability_zone) and
srv.host == host and srv.is_up):
return True
return False
def _get_any_available_backup_service(
self,
availability_zone: str) -> Optional[str]:
"""Get an available backup service host.
Get an available backup service host in the specified
availability zone.
"""
services = [srv for srv in self._list_backup_services()]
random.shuffle(services)
# Get the next running service with matching availability zone.
idx = 0
while idx < len(services):
srv = services[idx]
if self._az_matched(srv, availability_zone) and srv.is_up:
return srv.host
idx = idx + 1
return None
def get_available_backup_service_host(self, host: str, az: str) -> str:
return self._get_available_backup_service_host(host, az)
def _get_available_backup_service_host(self, host: str, az: str) -> str:
"""Return an appropriate backup service host."""
backup_host = None
if not host or not CONF.backup_use_same_host:
backup_host = self._get_any_available_backup_service(az)
elif self._is_backup_service_enabled(az, host):
backup_host = host
if not backup_host:
raise exception.ServiceNotFound(service_id='cinder-backup')
return backup_host
def _list_backup_services(self) -> list['objects.Service']:
"""List all enabled backup services.
:returns: list -- hosts for services that are enabled for backup.
"""
topic = constants.BACKUP_TOPIC
ctxt = context.get_admin_context()
services = objects.ServiceList.get_all_by_topic(
ctxt, topic, disabled=False)
return services
def _list_backup_hosts(self) -> list:
services = self._list_backup_services()
return [srv.host for srv in services
if not srv.disabled and srv.is_up]
def create(self,
context: context.RequestContext,
name: Optional[str],
description: Optional[str],
volume_id: str,
container: str,
incremental: bool = False,
availability_zone: Optional[str] = None,
force: bool = False,
snapshot_id: Optional[str] = None,
metadata: Optional[dict] = None) -> 'objects.Backup':
"""Make the RPC call to create a volume backup."""
volume = self.volume_api.get(context, volume_id)
context.authorize(policy.CREATE_POLICY, target_obj=volume)
snapshot = None
if snapshot_id:
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
if volume_id != snapshot.volume_id:
msg = (_('Volume %(vol1)s does not match with '
'snapshot.volume_id %(vol2)s.')
% {'vol1': volume_id,
'vol2': snapshot.volume_id})
raise exception.InvalidVolume(reason=msg)
if snapshot['status'] not in ["available"]:
msg = (_('Snapshot to be backed up must be available, '
'but the current status is "%s".')
% snapshot['status'])
raise exception.InvalidSnapshot(reason=msg)
elif volume['status'] not in ["available", "in-use"]:
msg = (_('Volume to be backed up must be available '
'or in-use, but the current status is "%s".')
% volume['status'])
raise exception.InvalidVolume(reason=msg)
elif volume['status'] in ["in-use"] and not force:
msg = _('Backing up an in-use volume must use '
'the force flag.')
raise exception.InvalidVolume(reason=msg)
previous_status = volume['status']
# Reserve a quota before setting volume status and backup status
try:
reserve_opts = {'backups': 1,
'backup_gigabytes': volume['size']}
reservations = QUOTAS.reserve(context, **reserve_opts)
except exception.OverQuota as e:
quota_utils.process_reserve_over_quota(
context, e,
resource='backups',
size=volume.size)
# Find the latest backup and use it as the parent backup to do an
# incremental backup.
latest_backup = None
latest_host = None
if incremental:
backups = objects.BackupList.get_all_by_volume(
context, volume_id, volume['project_id'],
filters={'project_id': context.project_id})
if backups.objects:
# NOTE(xyang): The 'data_timestamp' field records the time
# when the data on the volume was first saved. If it is
# a backup from volume, 'data_timestamp' will be the same
# as 'created_at' for a backup. If it is a backup from a
# snapshot, 'data_timestamp' will be the same as
# 'created_at' for a snapshot.
# If not backing up from snapshot, the backup with the latest
# 'data_timestamp' will be the parent; If backing up from
# snapshot, the backup with the latest 'data_timestamp' will
# be chosen only if 'data_timestamp' is earlier than the
# 'created_at' timestamp of the snapshot; Otherwise, the
# backup will not be chosen as the parent.
# For example, a volume has a backup taken at 8:00, then
# a snapshot taken at 8:10, and then a backup at 8:20.
# When taking an incremental backup of the snapshot, the
# parent should be the backup at 8:00, not 8:20, and the
# 'data_timestamp' of this new backup will be 8:10.
latest_backup = max(
backups.objects,
key=lambda x: x['data_timestamp']
if (x['status'] == fields.BackupStatus.AVAILABLE and (
not snapshot or (snapshot and x['data_timestamp']
< snapshot['created_at'])))
else datetime(1, 1, 1, 1, 1, 1, tzinfo=ZoneInfo('UTC')))
else:
QUOTAS.rollback(context, reservations)
msg = _('No backups available to do an incremental backup.')
raise exception.InvalidBackup(reason=msg)
parent_id = None
parent = None
if latest_backup:
parent = latest_backup
parent_id = latest_backup.id
if 'posix' in latest_backup.service:
# The posix driver needs to schedule incremental backups
# on the same host as the last backup, otherwise there's
# nothing to base the incremental backup on.
latest_host = latest_backup.host
if latest_backup['status'] != fields.BackupStatus.AVAILABLE:
QUOTAS.rollback(context, reservations)
msg = _('No backups available to do an incremental backup.')
raise exception.InvalidBackup(reason=msg)
data_timestamp = None
if snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
data_timestamp = snapshot.created_at
self.db.snapshot_update(
context, snapshot_id,
{'status': fields.SnapshotStatus.BACKING_UP})
else:
self.db.volume_update(context, volume_id,
{'status': 'backing-up',
'previous_status': previous_status})
kwargs = {
'user_id': context.user_id,
'project_id': context.project_id,
'display_name': name,
'display_description': description,
'volume_id': volume_id,
'status': fields.BackupStatus.CREATING,
'container': container,
'parent_id': parent_id,
'size': volume['size'],
'snapshot_id': snapshot_id,
'data_timestamp': data_timestamp,
'parent': parent,
'host': latest_host,
'metadata': metadata or {},
'availability_zone': availability_zone
}
try:
backup = objects.Backup(context=context, **kwargs)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
try:
backup.create()
if not snapshot_id:
backup.data_timestamp = backup.created_at
backup.save()
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
if 'id' in backup:
backup.destroy()
finally:
QUOTAS.rollback(context, reservations)
self.scheduler_rpcapi.create_backup(context, backup)
return backup
def restore(self,
context: context.RequestContext,
backup_id: str,
volume_id: Optional[str] = None,
name: Optional[str] = None) -> dict:
"""Make the RPC call to restore a volume backup."""
backup = self.get(context, backup_id)
context.authorize(policy.RESTORE_POLICY, target_obj=backup)
if backup['status'] != fields.BackupStatus.AVAILABLE:
msg = _('Backup status must be available')
raise exception.InvalidBackup(reason=msg)
size = backup['size']
if size is None:
msg = _('Backup to be restored has invalid size')
raise exception.InvalidBackup(reason=msg)
# Create a volume if none specified. If a volume is specified check
# it is large enough for the backup
if volume_id is None:
if name is None:
name = 'restore_backup_%s' % backup_id
description = 'auto-created_from_restore_from_backup'
LOG.info("Creating volume of %(size)s GB for restore of "
"backup %(backup_id)s.",
{'size': size, 'backup_id': backup_id})
volume = self.volume_api.create(context, size, name, description)
volume_is_new = True
volume_id = volume['id']
while True:
volume = self.volume_api.get(context, volume_id)
if volume['status'] != 'creating':
break
greenthread.sleep(1)
if volume['status'] == "error":
msg = (_('Error while creating volume %(volume_id)s '
'for restoring backup %(backup_id)s.') %
{'volume_id': volume_id, 'backup_id': backup_id})
raise exception.InvalidVolume(reason=msg)
else:
volume = self.volume_api.get(context, volume_id)
volume_is_new = False
if volume['status'] != "available":
msg = _('Volume to be restored to must be available')
raise exception.InvalidVolume(reason=msg)
LOG.debug('Checking backup size %(bs)s against volume size %(vs)s',
{'bs': size, 'vs': volume['size']})
if size > volume['size']:
msg = (_('volume size %(volume_size)d is too small to restore '
'backup of size %(size)d.') %
{'volume_size': volume['size'], 'size': size})
raise exception.InvalidVolume(reason=msg)
LOG.info("Overwriting volume %(volume_id)s with restore of "
"backup %(backup_id)s",
{'volume_id': volume_id, 'backup_id': backup_id})
# Setting the status here rather than setting at start and unrolling
# for each error condition, it should be a very small window
backup.host = self._get_available_backup_service_host(
backup.host, backup.availability_zone)
backup.status = fields.BackupStatus.RESTORING
backup.restore_volume_id = volume.id
backup.save()
self.db.volume_update(context, volume_id, {'status':
'restoring-backup'})
self.backup_rpcapi.restore_backup(context, backup.host, backup,
volume_id, volume_is_new)
d = {'backup_id': backup_id,
'volume_id': volume_id,
'volume_name': volume['display_name'], }
return d
def reset_status(self,
context: context.RequestContext,
backup_id: str,
status: str) -> None:
"""Make the RPC call to reset a volume backup's status.
Call backup manager to execute backup status reset operation.
:param context: running context
:param backup_id: which backup's status to be reset
:param status: backup's status to be reset
:raises InvalidBackup:
"""
# get backup info
backup = self.get(context, backup_id)
context.authorize(
backup_action_policy.BASE_POLICY_NAME % "reset_status",
target_obj=backup)
backup.host = self._get_available_backup_service_host(
backup.host, backup.availability_zone)
backup.save()
# send to manager to do reset operation
self.backup_rpcapi.reset_status(ctxt=context, backup=backup,
status=status)
def export_record(self,
context: context.RequestContext,
backup_id: str) -> dict:
"""Make the RPC call to export a volume backup.
Call backup manager to execute backup export.
:param context: running context
:param backup_id: backup id to export
:returns: dictionary -- a description of how to import the backup
:returns: contains 'backup_url' and 'backup_service'
:raises InvalidBackup:
"""
backup = self.get(context, backup_id)
context.authorize(policy.EXPORT_POLICY, target_obj=backup)
if backup['status'] != fields.BackupStatus.AVAILABLE:
msg = (_('Backup status must be available and not %s.') %
backup['status'])
raise exception.InvalidBackup(reason=msg)
LOG.debug("Calling RPCAPI with context: "
"%(ctx)s, host: %(host)s, backup: %(id)s.",
{'ctx': context,
'host': backup['host'],
'id': backup['id']})
backup.host = self._get_available_backup_service_host(
backup.host, backup.availability_zone)
backup.save()
export_data = self.backup_rpcapi.export_record(context, backup)
return export_data
def _get_import_backup(self,
context: context.RequestContext,
backup_url: str) -> 'objects.Backup':
"""Prepare database backup record for import.
This method decodes provided backup_url and expects to find the id of
the backup in there.
Then checks the DB for the presence of this backup record and if it
finds it and is not deleted it will raise an exception because the
record cannot be created or used.
If the record is in deleted status then we must be trying to recover
this record, so we'll reuse it.
If the record doesn't already exist we create it with provided id.
:param context: running context
:param backup_url: backup description to be used by the backup driver
:return: BackupImport object
:raises InvalidBackup:
:raises InvalidInput:
"""
reservations = None
backup = None
# Deserialize string backup record into a dictionary
backup_record = objects.Backup.decode_record(backup_url)
# ID is a required field since it's what links incremental backups
if 'id' not in backup_record:
msg = _('Provided backup record is missing an id')
raise exception.InvalidInput(reason=msg)
# Since we use size to reserve&commit quota, size is another required
# field.
if 'size' not in backup_record:
msg = _('Provided backup record is missing size attribute')
raise exception.InvalidInput(reason=msg)
# Try to get the backup with that ID in all projects even among
# deleted entries (we reuse soft-deleted backups).
try:
backup = objects.BackupImport.get_by_id(
context.elevated(read_deleted='yes'),
backup_record['id'],
project_only=False)
# If record exists and it's not deleted we cannot proceed
# with the import
if backup.status != fields.BackupStatus.DELETED:
msg = _('Backup already exists in database.')
raise exception.InvalidBackup(reason=msg)
except exception.BackupNotFound:
pass
# Check that we're under limit by reserving quota
try:
reserve_opts = {'backups': 1,
'backup_gigabytes': backup_record['size']}
reservations = QUOTAS.reserve(context, **reserve_opts)
except exception.OverQuota as e:
quota_utils.process_reserve_over_quota(
context, e,
resource='backups',
size=backup_record['size'])
kwargs = {
'user_id': context.user_id,
'project_id': context.project_id,
'volume_id': IMPORT_VOLUME_ID,
'status': fields.BackupStatus.CREATING,
'deleted_at': None,
'deleted': False,
'metadata': {}
}
try:
if backup:
# "revive" the soft-deleted backup record retrieved earlier
backup.update(kwargs)
backup.save()
else:
# create a new backup with the specified ID
backup = objects.BackupImport(context=context,
id=backup_record['id'], **kwargs)
backup.create()
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
if backup and 'id' in backup:
backup.destroy()
finally:
QUOTAS.rollback(context, reservations)
return backup
def import_record(self,
context: context.RequestContext,
backup_service: str,
backup_url: str) -> 'objects.Backup':
"""Make the RPC call to import a volume backup.
:param context: running context
:param backup_service: backup service name
:param backup_url: backup description to be used by the backup driver
:raises InvalidBackup:
:raises ServiceNotFound:
:raises InvalidInput:
"""
context.authorize(policy.IMPORT_POLICY)
# NOTE(ronenkat): since we don't have a backup-scheduler
# we need to find a host that support the backup service
# that was used to create the backup.
# We send it to the first backup service host, and the backup manager
# on that host will forward it to other hosts on the hosts list if it
# cannot support correct service itself.
hosts = self._list_backup_hosts()
if len(hosts) == 0:
raise exception.ServiceNotFound(service_id=backup_service)
# Get Backup object that will be used to import this backup record
backup = self._get_import_backup(context, backup_url)
first_host = hosts.pop()
self.backup_rpcapi.import_record(context,
first_host,
backup,
backup_service,
backup_url,
hosts)
return backup
def update(self,
context: context.RequestContext,
backup_id: str,
fields: list) -> 'objects.Service':
backup = self.get(context, backup_id)
context.authorize(policy.UPDATE_POLICY, target_obj=backup)
backup.update(fields)
backup.save()
return backup
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/backup/chunkeddriver.py 0000664 0000000 0000000 00000112653 15131732575 0024320 0 ustar 00root root 0000000 0000000 # Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2014 TrilioData, Inc
# Copyright (c) 2015 EMC Corporation
# Copyright (C) 2015 Kevin Fox
# Copyright (C) 2015 Tom Barron
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic base class to implement metadata, compression and chunked data
operations
"""
import abc
import hashlib
import json
import os
import sys
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import units
from cinder.backup import driver
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
from cinder.volume import volume_utils
if sys.platform == 'win32':
from os_win import utilsfactory as os_win_utilsfactory
else:
os_win_utilsfactory = None
LOG = logging.getLogger(__name__)
backup_opts = [
cfg.StrOpt('backup_compression_algorithm',
default='zlib',
ignore_case=True,
choices=[('none', 'Do not use compression'),
('off', "Same as 'none'"),
('no', "Same as 'none'"),
('zlib', 'Use the Deflate compression algorithm'),
('gzip', "Same as 'zlib'"),
('bz2', 'Use Burrows-Wheeler transform compression'),
('bzip2', "Same as 'bz2'"),
('zstd', 'Use the Zstandard compression algorithm')],
help="Compression algorithm for backups ('none' to disable)"),
]
CONF = cfg.CONF
CONF.register_opts(backup_opts)
def _write_nonzero(volume_file, volume_offset, content):
"""Write non-zero parts of `content` into `volume_file`."""
chunk_length = 1024 * 1024
content = memoryview(content)
for chunk_offset in range(0, len(content), chunk_length):
chunk_end = chunk_offset + chunk_length
chunk = content[chunk_offset:chunk_end]
# The len(chunk) may be smaller than chunk_length. It's okay.
if not volume_utils.is_all_zero(chunk):
volume_file.seek(volume_offset + chunk_offset)
volume_file.write(chunk.tobytes())
def _write_volume(volume_is_new, volume_file, volume_offset, content):
if volume_is_new:
_write_nonzero(volume_file, volume_offset, content)
else:
volume_file.seek(volume_offset)
volume_file.write(content)
# Object writer and reader returned by inheriting classes must not have any
# logging calls, as well as the compression libraries, as eventlet has a bug
# (https://github.com/eventlet/eventlet/issues/432) that would result in
# failures.
class ChunkedBackupDriver(driver.BackupDriver, metaclass=abc.ABCMeta):
"""Abstract chunked backup driver.
Implements common functionality for backup drivers that store volume
data in multiple "chunks" in a backup repository when the size of
the backed up cinder volume exceeds the size of a backup repository
"chunk."
Provides abstract methods to be implemented in concrete chunking
drivers.
"""
DRIVER_VERSION = '1.0.0'
DRIVER_VERSION_MAPPING = {'1.0.0': '_restore_v1'}
def _get_compressor(self, algorithm):
try:
if algorithm.lower() in ('none', 'off', 'no'):
return None
if algorithm.lower() in ('zlib', 'gzip'):
import zlib as compressor
result = compressor
elif algorithm.lower() in ('bz2', 'bzip2'):
import bz2 as compressor
result = compressor
elif algorithm.lower() == 'zstd':
import zstd as compressor
result = compressor
else:
result = None
if result:
# NOTE(geguileo): Compression/Decompression starves
# greenthreads so we use a native thread instead.
return eventlet.tpool.Proxy(result)
except ImportError:
pass
err = _('unsupported compression algorithm: %s') % algorithm
raise ValueError(err)
def __init__(
self, context, chunk_size_bytes, sha_block_size_bytes,
backup_default_container, enable_progress_timer,
):
super(ChunkedBackupDriver, self).__init__(context)
self.chunk_size_bytes = chunk_size_bytes
self.sha_block_size_bytes = sha_block_size_bytes
self.backup_default_container = backup_default_container
self.enable_progress_timer = enable_progress_timer
self.backup_timer_interval = CONF.backup_timer_interval
self.data_block_num = CONF.backup_object_number_per_notification
self.az = CONF.storage_availability_zone
self.backup_compression_algorithm = CONF.backup_compression_algorithm
self.compressor = \
self._get_compressor(CONF.backup_compression_algorithm)
self.support_force_delete = True
if sys.platform == 'win32' and self.chunk_size_bytes % 4096:
# The chunk size must be a multiple of the sector size. In order
# to fail out early and avoid attaching the disks, we'll just
# enforce the chunk size to be a multiple of 4096.
err = _("Invalid chunk size. It must be a multiple of 4096.")
raise exception.InvalidConfigurationValue(message=err)
def _get_object_writer(self, container, object_name, extra_metadata=None):
"""Return writer proxy-wrapped to execute methods in native thread."""
writer = self.get_object_writer(container, object_name, extra_metadata)
return eventlet.tpool.Proxy(writer)
def _get_object_reader(self, container, object_name, extra_metadata=None):
"""Return reader proxy-wrapped to execute methods in native thread."""
reader = self.get_object_reader(container, object_name, extra_metadata)
return eventlet.tpool.Proxy(reader)
# To create your own "chunked" backup driver, implement the following
# abstract methods.
@abc.abstractmethod
def put_container(self, container):
"""Create the container if needed. No failure if it pre-exists."""
return
@abc.abstractmethod
def get_container_entries(self, container, prefix):
"""Get container entry names."""
return
@abc.abstractmethod
def get_object_writer(self, container, object_name, extra_metadata=None):
"""Returns a writer object which stores the chunk data.
The object returned should be a context handler that can be used in a
"with" context.
The object writer methods must not have any logging calls, as eventlet
has a bug (https://github.com/eventlet/eventlet/issues/432) that would
result in failures.
"""
return
@abc.abstractmethod
def get_object_reader(self, container, object_name, extra_metadata=None):
"""Returns a reader object for the backed up chunk.
The object reader methods must not have any logging calls, as eventlet
has a bug (https://github.com/eventlet/eventlet/issues/432) that would
result in failures.
"""
return
@abc.abstractmethod
def delete_object(self, container, object_name):
"""Delete object from container."""
return
@abc.abstractmethod
def _generate_object_name_prefix(self, backup):
return
@abc.abstractmethod
def update_container_name(self, backup, container):
"""Allow sub-classes to override container name.
This method exists so that sub-classes can override the container name
as it comes in to the driver in the backup object. Implementations
should return None if no change to the container name is desired.
"""
return
@abc.abstractmethod
def get_extra_metadata(self, backup, volume):
"""Return extra metadata to use in prepare_backup.
This method allows for collection of extra metadata in prepare_backup()
which will be passed to get_object_reader() and get_object_writer().
Subclass extensions can use this extra information to optimize
data transfers. Return a json serializable object.
"""
return
def _create_container(self, backup):
# Container's name will be decided by the driver (returned by method
# update_container_name), if no change is required by the driver then
# we'll use the one the backup object already has, but if it doesn't
# have one backup_default_container will be used.
new_container = self.update_container_name(backup, backup.container)
if new_container:
# If the driver is not really changing the name we don't want to
# dirty the field in the object and save it to the DB with the same
# value.
if new_container != backup.container:
backup.container = new_container
elif backup.container is None:
backup.container = self.backup_default_container
LOG.debug('_create_container started, container: %(container)s,'
'backup: %(backup_id)s.',
{'container': backup.container, 'backup_id': backup.id})
backup.save()
self.put_container(backup.container)
return backup.container
def _generate_object_names(self, backup):
prefix = backup['service_metadata']
object_names = self.get_container_entries(backup['container'], prefix)
LOG.debug('generated object list: %s.', object_names)
return object_names
def _metadata_filename(self, backup):
object_name = backup['service_metadata']
filename = '%s_metadata' % object_name
return filename
def _sha256_filename(self, backup):
object_name = backup['service_metadata']
filename = '%s_sha256file' % object_name
return filename
def _write_metadata(self, backup, volume_id, container, object_list,
volume_meta, extra_metadata=None):
filename = self._metadata_filename(backup)
LOG.debug('_write_metadata started, container name: %(container)s,'
' metadata filename: %(filename)s.',
{'container': container, 'filename': filename})
metadata = {}
metadata['version'] = self.DRIVER_VERSION
metadata['backup_id'] = backup['id']
metadata['volume_id'] = volume_id
metadata['backup_name'] = backup['display_name']
metadata['backup_description'] = backup['display_description']
metadata['created_at'] = str(backup['created_at'])
metadata['objects'] = object_list
metadata['parent_id'] = backup['parent_id']
metadata['volume_meta'] = volume_meta
if extra_metadata:
metadata['extra_metadata'] = extra_metadata
metadata_json = json.dumps(metadata, sort_keys=True, indent=2)
metadata_json = metadata_json.encode('utf-8')
with self._get_object_writer(container, filename) as writer:
writer.write(metadata_json)
LOG.debug('_write_metadata finished. Metadata: %s.', metadata_json)
def _write_sha256file(self, backup, volume_id, container, sha256_list):
filename = self._sha256_filename(backup)
LOG.debug('_write_sha256file started, container name: %(container)s,'
' sha256file filename: %(filename)s.',
{'container': container, 'filename': filename})
sha256file = {}
sha256file['version'] = self.DRIVER_VERSION
sha256file['backup_id'] = backup['id']
sha256file['volume_id'] = volume_id
sha256file['backup_name'] = backup['display_name']
sha256file['backup_description'] = backup['display_description']
sha256file['created_at'] = str(backup['created_at'])
sha256file['chunk_size'] = self.sha_block_size_bytes
sha256file['sha256s'] = sha256_list
sha256file_json = json.dumps(sha256file, sort_keys=True, indent=2)
sha256file_json = sha256file_json.encode('utf-8')
with self._get_object_writer(container, filename) as writer:
writer.write(sha256file_json)
LOG.debug('_write_sha256file finished.')
def _read_metadata(self, backup):
container = backup['container']
filename = self._metadata_filename(backup)
LOG.debug('_read_metadata started, container name: %(container)s, '
'metadata filename: %(filename)s.',
{'container': container, 'filename': filename})
with self._get_object_reader(container, filename) as reader:
metadata_json = reader.read()
metadata_json = metadata_json.decode('utf-8')
metadata = json.loads(metadata_json)
LOG.debug('_read_metadata finished. Metadata: %s.', metadata_json)
return metadata
def _read_sha256file(self, backup):
container = backup['container']
filename = self._sha256_filename(backup)
LOG.debug('_read_sha256file started, container name: %(container)s, '
'sha256 filename: %(filename)s.',
{'container': container, 'filename': filename})
with self._get_object_reader(container, filename) as reader:
sha256file_json = reader.read()
sha256file_json = sha256file_json.decode('utf-8')
sha256file = json.loads(sha256file_json)
LOG.debug('_read_sha256file finished.')
return sha256file
def _prepare_backup(self, backup):
"""Prepare the backup process and return the backup metadata."""
volume = self.db.volume_get(self.context, backup.volume_id)
if volume['size'] <= 0:
err = _('volume size %d is invalid.') % volume['size']
raise exception.InvalidVolume(reason=err)
container = self._create_container(backup)
object_prefix = self._generate_object_name_prefix(backup)
backup.service_metadata = object_prefix
backup.save()
volume_size_bytes = volume['size'] * units.Gi
availability_zone = self.az
LOG.debug('starting backup of volume: %(volume_id)s,'
' volume size: %(volume_size_bytes)d, object names'
' prefix %(object_prefix)s, availability zone:'
' %(availability_zone)s',
{
'volume_id': backup.volume_id,
'volume_size_bytes': volume_size_bytes,
'object_prefix': object_prefix,
'availability_zone': availability_zone,
})
object_meta = {'id': 1, 'list': [], 'prefix': object_prefix,
'volume_meta': None}
object_sha256 = {'id': 1, 'sha256s': [], 'prefix': object_prefix}
extra_metadata = self.get_extra_metadata(backup, volume)
if extra_metadata is not None:
object_meta['extra_metadata'] = extra_metadata
return (object_meta, object_sha256, extra_metadata, container,
volume_size_bytes)
def _backup_chunk(self, backup, container, data, data_offset,
object_meta, extra_metadata):
"""Backup data chunk based on the object metadata and offset."""
object_prefix = object_meta['prefix']
object_list = object_meta['list']
object_id = object_meta['id']
object_name = '%s-%05d' % (object_prefix, object_id)
obj = {}
obj[object_name] = {}
obj[object_name]['offset'] = data_offset
obj[object_name]['length'] = len(data)
LOG.debug('Backing up chunk of data from volume.')
algorithm, output_data = self._prepare_output_data(data)
obj[object_name]['compression'] = algorithm
LOG.debug('About to put_object')
with self._get_object_writer(
container, object_name, extra_metadata=extra_metadata
) as writer:
writer.write(output_data)
md5 = eventlet.tpool.execute(
hashlib.md5, data, usedforsecurity=False).hexdigest()
obj[object_name]['md5'] = md5
LOG.debug('backup MD5 for %(object_name)s: %(md5)s',
{'object_name': object_name, 'md5': md5})
object_list.append(obj)
object_id += 1
object_meta['list'] = object_list
object_meta['id'] = object_id
LOG.debug('Calling eventlet.sleep(0)')
eventlet.sleep(0)
def _prepare_output_data(self, data):
if self.compressor is None:
return 'none', data
data_size_bytes = len(data)
# Execute compression in native thread so it doesn't prevent
# cooperative greenthread switching.
compressed_data = self.compressor.compress(data)
comp_size_bytes = len(compressed_data)
algorithm = CONF.backup_compression_algorithm.lower()
if comp_size_bytes >= data_size_bytes:
LOG.debug('Compression of this chunk was ineffective: '
'original length: %(data_size_bytes)d, '
'compressed length: %(compressed_size_bytes)d. '
'Using original data for this chunk.',
{'data_size_bytes': data_size_bytes,
'compressed_size_bytes': comp_size_bytes,
})
return 'none', data
LOG.debug('Compressed %(data_size_bytes)d bytes of data '
'to %(comp_size_bytes)d bytes using %(algorithm)s.',
{'data_size_bytes': data_size_bytes,
'comp_size_bytes': comp_size_bytes,
'algorithm': algorithm,
})
return algorithm, compressed_data
def _finalize_backup(self, backup, container, object_meta, object_sha256):
"""Write the backup's metadata to the backup repository."""
object_list = object_meta['list']
object_id = object_meta['id']
volume_meta = object_meta['volume_meta']
sha256_list = object_sha256['sha256s']
extra_metadata = object_meta.get('extra_metadata')
self._write_sha256file(backup,
backup.volume_id,
container,
sha256_list)
self._write_metadata(backup,
backup.volume_id,
container,
object_list,
volume_meta,
extra_metadata)
# NOTE(whoami-rajat) : The object_id variable is used to name
# the backup objects and hence differs from the object_count
# variable, therefore the increment of object_id value in the last
# iteration of _backup_chunk() method shouldn't be reflected in the
# object_count variable.
backup.object_count = object_id - 1
backup.save()
LOG.debug('backup %s finished.', backup['id'])
def _backup_metadata(self, backup, object_meta):
"""Backup volume metadata.
NOTE(dosaboy): the metadata we are backing up is obtained from a
versioned api so we should not alter it in any way here.
We must also be sure that the service that will perform
the restore is compatible with version used.
"""
json_meta = self.get_metadata(backup['volume_id'])
if not json_meta:
LOG.debug("No volume metadata to backup.")
return
object_meta["volume_meta"] = json_meta
def _send_progress_end(self, context, backup, object_meta):
object_meta['backup_percent'] = 100
volume_utils.notify_about_backup_usage(context,
backup,
"createprogress",
extra_usage_info=
object_meta)
def _send_progress_notification(self, context, backup, object_meta,
total_block_sent_num, total_volume_size):
backup_percent = total_block_sent_num * 100 / total_volume_size
object_meta['backup_percent'] = backup_percent
volume_utils.notify_about_backup_usage(context,
backup,
"createprogress",
extra_usage_info=
object_meta)
def _get_win32_phys_disk_size(self, disk_path):
win32_diskutils = os_win_utilsfactory.get_diskutils()
disk_number = win32_diskutils.get_device_number_from_device_name(
disk_path)
return win32_diskutils.get_disk_size(disk_number)
def _calculate_sha(self, data):
"""Calculate SHA256 of a data chunk.
This method cannot log anything as it is called on a native thread.
"""
# NOTE(geguileo): Using memoryview to avoid data copying when slicing
# for the sha256 call.
chunk = memoryview(data)
shalist = []
off = 0
datalen = len(chunk)
while off < datalen:
chunk_end = min(datalen, off + self.sha_block_size_bytes)
block = chunk[off:chunk_end]
sha = hashlib.sha256(block).hexdigest()
shalist.append(sha)
off += self.sha_block_size_bytes
return shalist
def backup(self, backup, volume_file, backup_metadata=True):
"""Backup the given volume.
If backup['parent_id'] is given, then an incremental backup
is performed.
"""
if self.chunk_size_bytes % self.sha_block_size_bytes:
err = _('Chunk size is not multiple of '
'block size for creating hash.')
raise exception.InvalidBackup(reason=err)
# Read the shafile of the parent backup if backup['parent_id']
# is given.
parent_backup_shafile = None
parent_backup = None
if backup.parent_id:
parent_backup = objects.Backup.get_by_id(self.context,
backup.parent_id)
parent_backup_shafile = self._read_sha256file(parent_backup)
parent_backup_shalist = parent_backup_shafile['sha256s']
if (parent_backup_shafile['chunk_size'] !=
self.sha_block_size_bytes):
err = (_('Hash block size has changed since the last '
'backup. New hash block size: %(new)s. Old hash '
'block size: %(old)s. Do a full backup.')
% {'old': parent_backup_shafile['chunk_size'],
'new': self.sha_block_size_bytes})
raise exception.InvalidBackup(reason=err)
# If the volume size increased since the last backup, fail
# the incremental backup and ask user to do a full backup.
if backup.size > parent_backup.size:
err = _('Volume size increased since the last '
'backup. Do a full backup.')
raise exception.InvalidBackup(reason=err)
win32_disk_size = None
if sys.platform == 'win32':
# When dealing with Windows physical disks, we need the exact
# size of the disk. Attempting to read passed this boundary will
# lead to an IOError exception. At the same time, we cannot
# seek to the end of file.
win32_disk_size = self._get_win32_phys_disk_size(volume_file.name)
(object_meta, object_sha256, extra_metadata, container,
volume_size_bytes) = self._prepare_backup(backup)
counter = 0
total_block_sent_num = 0
# There are two mechanisms to send the progress notification.
# 1. The notifications are periodically sent in a certain interval.
# 2. The notifications are sent after a certain number of chunks.
# Both of them are working simultaneously during the volume backup,
# when "chunked" backup drivers are deployed.
def _notify_progress():
self._send_progress_notification(self.context, backup,
object_meta,
total_block_sent_num,
volume_size_bytes)
timer = loopingcall.FixedIntervalLoopingCall(
_notify_progress)
if self.enable_progress_timer:
timer.start(interval=self.backup_timer_interval)
sha256_list = object_sha256['sha256s']
shaindex = 0
is_backup_canceled = False
while True:
# First of all, we check the status of this backup. If it
# has been changed to delete or has been deleted, we cancel the
# backup process to do forcing delete.
with backup.as_read_deleted():
backup.refresh()
if backup.status in (fields.BackupStatus.DELETING,
fields.BackupStatus.DELETED):
is_backup_canceled = True
# To avoid the chunk left when deletion complete, need to
# clean up the object of chunk again.
self.delete_backup(backup)
LOG.debug('Cancel the backup process of %s.', backup.id)
break
data_offset = volume_file.tell()
if win32_disk_size is not None:
read_bytes = min(self.chunk_size_bytes,
win32_disk_size - data_offset)
else:
read_bytes = self.chunk_size_bytes
data = volume_file.read(read_bytes)
if data == b'':
break
# Calculate new shas with the datablock.
shalist = eventlet.tpool.execute(self._calculate_sha, data)
sha256_list.extend(shalist)
# If parent_backup is not None, that means an incremental
# backup will be performed.
if parent_backup:
# Find the extent that needs to be backed up.
extent_off = -1
for idx, sha in enumerate(shalist):
if sha != parent_backup_shalist[shaindex]:
if extent_off == -1:
# Start of new extent.
extent_off = idx * self.sha_block_size_bytes
else:
if extent_off != -1:
# We've reached the end of extent.
extent_end = idx * self.sha_block_size_bytes
segment = data[extent_off:extent_end]
self._backup_chunk(backup, container, segment,
data_offset + extent_off,
object_meta,
extra_metadata)
extent_off = -1
shaindex += 1
# The last extent extends to the end of data buffer.
if extent_off != -1:
extent_end = len(data)
segment = data[extent_off:extent_end]
self._backup_chunk(backup, container, segment,
data_offset + extent_off,
object_meta, extra_metadata)
extent_off = -1
else: # Do a full backup.
self._backup_chunk(backup, container, data, data_offset,
object_meta, extra_metadata)
# Notifications
total_block_sent_num += self.data_block_num
counter += 1
if counter == self.data_block_num:
# Send the notification to Ceilometer when the chunk
# number reaches the data_block_num. The backup percentage
# is put in the metadata as the extra information.
self._send_progress_notification(self.context, backup,
object_meta,
total_block_sent_num,
volume_size_bytes)
# Reset the counter
counter = 0
# Stop the timer.
timer.stop()
# If backup has been cancelled we have nothing more to do
# but timer.stop().
if is_backup_canceled:
return
# All the data have been sent, the backup_percent reaches 100.
self._send_progress_end(self.context, backup, object_meta)
object_sha256['sha256s'] = sha256_list
if backup_metadata:
try:
self._backup_metadata(backup, object_meta)
# Whatever goes wrong, we want to log, cleanup, and re-raise.
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Backup volume metadata failed.")
self.delete_backup(backup)
self._finalize_backup(backup, container, object_meta, object_sha256)
def _restore_v1(self, backup, volume_id, metadata, volume_file,
volume_is_new, requested_backup):
"""Restore a v1 volume backup.
Raises BackupRestoreCancel on any requested_backup status change, we
ignore the backup parameter for this check since that's only the
current data source from the list of backup sources.
"""
backup_id = backup['id']
LOG.debug('v1 volume backup restore of %s started.', backup_id)
extra_metadata = metadata.get('extra_metadata')
container = backup['container']
metadata_objects = metadata['objects']
metadata_object_names = []
for obj in metadata_objects:
metadata_object_names.extend(obj.keys())
LOG.debug('metadata_object_names = %s.', metadata_object_names)
prune_list = [self._metadata_filename(backup),
self._sha256_filename(backup)]
object_names = [object_name for object_name in
self._generate_object_names(backup)
if object_name not in prune_list]
if sorted(object_names) != sorted(metadata_object_names):
err = _('restore_backup aborted, actual object list '
'does not match object list stored in metadata.')
raise exception.InvalidBackup(reason=err)
for metadata_object in metadata_objects:
# Abort when status changes to error, available, or anything else
with requested_backup.as_read_deleted():
requested_backup.refresh()
if requested_backup.status != fields.BackupStatus.RESTORING:
raise exception.BackupRestoreCancel(back_id=backup.id,
vol_id=volume_id)
object_name, obj = list(metadata_object.items())[0]
LOG.debug('restoring object. backup: %(backup_id)s, '
'container: %(container)s, object name: '
'%(object_name)s, volume: %(volume_id)s.',
{
'backup_id': backup_id,
'container': container,
'object_name': object_name,
'volume_id': volume_id,
})
with self._get_object_reader(
container, object_name,
extra_metadata=extra_metadata) as reader:
body = reader.read()
compression_algorithm = metadata_object[object_name]['compression']
decompressor = self._get_compressor(compression_algorithm)
if decompressor is not None:
LOG.debug('decompressing data using %s algorithm',
compression_algorithm)
decompressed = decompressor.decompress(body)
body = None # Allow Python to free it
_write_volume(volume_is_new,
volume_file, obj['offset'], decompressed)
decompressed = None # Allow Python to free it
else:
_write_volume(volume_is_new,
volume_file, obj['offset'], body)
body = None # Allow Python to free it
# force flush every write to avoid long blocking write on close
volume_file.flush()
# Be tolerant to IO implementations that do not support fileno()
try:
fileno = volume_file.fileno()
except IOError:
LOG.debug("volume_file does not support fileno() so skipping "
"fsync()")
else:
os.fsync(fileno)
# Restoring a backup to a volume can take some time. Yield so other
# threads can run, allowing for among other things the service
# status to be updated
eventlet.sleep(0)
LOG.debug('v1 volume backup restore of %s finished.',
backup_id)
def restore(self, backup, volume_id, volume_file, volume_is_new):
"""Restore the given volume backup from backup repository.
Raises BackupRestoreCancel on any backup status change.
"""
backup_id = backup['id']
container = backup['container']
object_prefix = backup['service_metadata']
LOG.debug('starting restore of backup %(object_prefix)s '
'container: %(container)s, '
'to %(new)s volume %(volume_id)s, '
'backup: %(backup_id)s.',
{
'object_prefix': object_prefix,
'container': container,
'volume_id': volume_id,
'backup_id': backup_id,
'new': 'new' if volume_is_new else 'existing',
})
metadata = self._read_metadata(backup)
metadata_version = metadata['version']
LOG.debug('Restoring backup version %s', metadata_version)
try:
restore_func = getattr(self, self.DRIVER_VERSION_MAPPING.get(
metadata_version))
except TypeError:
err = (_('No support to restore backup version %s')
% metadata_version)
raise exception.InvalidBackup(reason=err)
# Build a list of backups based on parent_id. A full backup
# will be the last one in the list.
backup_list = []
backup_list.append(backup)
current_backup = backup
while current_backup.parent_id:
prev_backup = objects.Backup.get_by_id(self.context,
current_backup.parent_id)
backup_list.append(prev_backup)
current_backup = prev_backup
# Do a full restore first, then layer the incremental backups
# on top of it in order.
index = len(backup_list) - 1
while index >= 0:
backup1 = backup_list[index]
index = index - 1
metadata = self._read_metadata(backup1)
restore_func(backup1, volume_id, metadata, volume_file,
volume_is_new, backup)
volume_meta = metadata.get('volume_meta', None)
try:
if volume_meta:
self.put_metadata(volume_id, volume_meta)
else:
LOG.debug("No volume metadata in this backup.")
except exception.BackupMetadataUnsupportedVersion:
msg = _("Metadata restore failed due to incompatible version.")
LOG.error(msg)
raise exception.BackupOperationError(msg)
LOG.debug('restore %(backup_id)s to %(volume_id)s finished.',
{'backup_id': backup_id, 'volume_id': volume_id})
def delete_backup(self, backup):
"""Delete the given backup."""
container = backup['container']
object_prefix = backup['service_metadata']
LOG.debug('delete started, backup: %(id)s, container: %(cont)s, '
'prefix: %(pre)s.',
{'id': backup['id'],
'cont': container,
'pre': object_prefix})
if container is not None and object_prefix is not None:
object_names = []
try:
object_names = self._generate_object_names(backup)
except Exception:
LOG.warning('Error while listing objects, continuing'
' with delete.')
for object_name in object_names:
self.delete_object(container, object_name)
LOG.debug('deleted object: %(object_name)s'
' in container: %(container)s.',
{
'object_name': object_name,
'container': container
})
# Deleting a backup's objects can take some time.
# Yield so other threads can run
eventlet.sleep(0)
LOG.debug('delete %s finished.', backup['id'])
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/backup/driver.py 0000664 0000000 0000000 00000042756 15131732575 0022764 0 ustar 00root root 0000000 0000000 # Copyright (C) 2013 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for all backup drivers."""
import abc
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from cinder.db import base
from cinder import exception
from cinder.i18n import _
backup_opts = [
cfg.IntOpt('backup_metadata_version', default=2,
help='Backup metadata version to be used when backing up '
'volume metadata. If this number is bumped, make sure the '
'service doing the restore supports the new version.'),
cfg.IntOpt('backup_object_number_per_notification',
default=10,
help='The number of chunks or objects, for which one '
'Ceilometer notification will be sent'),
cfg.IntOpt('backup_timer_interval',
default=120,
help='Interval, in seconds, between two progress notifications '
'reporting the backup status'),
]
CONF = cfg.CONF
CONF.register_opts(backup_opts)
LOG = logging.getLogger(__name__)
class BackupMetadataAPI(base.Base):
TYPE_TAG_VOL_BASE_META = 'volume-base-metadata'
TYPE_TAG_VOL_META = 'volume-metadata'
TYPE_TAG_VOL_GLANCE_META = 'volume-glance-metadata'
def __init__(self, context):
super().__init__()
self.context = context
self._key_mgr = None
@staticmethod
def _is_serializable(value):
"""Returns True if value is serializable."""
try:
jsonutils.dumps(value)
except TypeError:
LOG.info("Value with type=%s is not serializable",
type(value))
return False
return True
def _save_vol_base_meta(self, container, volume_id):
"""Save base volume metadata to container.
This will fetch all fields from the db Volume object for volume_id and
save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_BASE_META
LOG.debug("Getting metadata type '%s'", type_tag)
meta = self.db.volume_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for key, value in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(value):
LOG.info("Unable to serialize field '%s' - excluding "
"from backup", key)
continue
# NOTE(abishop): The backup manager is now responsible for
# ensuring a copy of the volume's encryption key ID is
# retained in case the volume is deleted. Yes, this means
# the backup's volume base metadata now stores the volume's
# original encryption key ID, which affects how things are
# handled when backups are restored. The backup manager
# handles this, too.
container[type_tag][key] = value
LOG.debug("Completed fetching metadata type '%s'", type_tag)
else:
LOG.debug("No metadata type '%s' available", type_tag)
def _save_vol_meta(self, container, volume_id):
"""Save volume metadata to container.
This will fetch all fields from the db VolumeMetadata object for
volume_id and save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_META
LOG.debug("Getting metadata type '%s'", type_tag)
meta = self.db.volume_metadata_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(meta[entry]):
LOG.info("Unable to serialize field '%s' - excluding "
"from backup", entry)
continue
container[type_tag][entry] = meta[entry]
LOG.debug("Completed fetching metadata type '%s'", type_tag)
else:
LOG.debug("No metadata type '%s' available", type_tag)
def _save_vol_glance_meta(self, container, volume_id):
"""Save volume Glance metadata to container.
This will fetch all fields from the db VolumeGlanceMetadata object for
volume_id and save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_GLANCE_META
LOG.debug("Getting metadata type '%s'", type_tag)
try:
meta = self.db.volume_glance_metadata_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(entry.value):
LOG.info("Unable to serialize field '%s' - "
"excluding from backup", entry)
continue
container[type_tag][entry.key] = entry.value
LOG.debug("Completed fetching metadata type '%s'", type_tag)
except exception.GlanceMetadataNotFound:
LOG.debug("No metadata type '%s' available", type_tag)
@staticmethod
def _filter(metadata, fields, excludes=None):
"""Returns set of metadata restricted to required fields.
If fields is empty list, the full set is returned.
:param metadata: master set of metadata
:param fields: list of fields we want to extract
:param excludes: fields to be excluded
:returns: filtered metadata
"""
if not fields:
return metadata
if not excludes:
excludes = []
subset = {}
for field in fields:
if field in metadata and field not in excludes:
subset[field] = metadata[field]
else:
LOG.debug("Excluding field '%s'", field)
return subset
def _restore_vol_base_meta(self, metadata, volume_id, fields):
"""Restore values to Volume object for provided fields."""
LOG.debug("Restoring volume base metadata")
excludes = []
# Ignore unencrypted backups.
key = 'encryption_key_id'
if key in fields and key in metadata and metadata[key] is not None:
self._restore_vol_encryption_meta(volume_id,
metadata['volume_type_id'])
# NOTE(dosaboy): if the target volume looks like it was auto-created
# as part of this restore operation and we have a name to restore
# then apply the name to the target volume. However, if that target
# volume already existed and it has a name or we do not have a name to
# restore, then ignore this key. This is intended to be a less drastic
# solution than commit 7ee80f7.
key = 'display_name'
if key in fields and key in metadata:
target_vol = self.db.volume_get(self.context, volume_id)
name = target_vol.get(key, '')
if (not metadata.get(key) or name and
not name.startswith('restore_backup_')):
excludes.append(key)
excludes.append('display_description')
metadata = self._filter(metadata, fields, excludes=excludes)
self.db.volume_update(self.context, volume_id, metadata)
def _restore_vol_encryption_meta(self, volume_id, src_volume_type_id):
"""Restores the volume_type_id for encryption if needed.
Only allow restoration of an encrypted backup if the destination
volume has the same volume type as the source volume. Otherwise
encryption will not work. If volume types are already the same,
no action is needed.
"""
dest_vol = self.db.volume_get(self.context, volume_id)
if dest_vol['volume_type_id'] != src_volume_type_id:
LOG.debug("Volume type id's do not match.")
# If the volume types do not match, and the destination volume
# does not have a volume type, force the destination volume
# to have the encrypted volume type, provided it still exists.
if dest_vol['volume_type_id'] is None:
try:
self.db.volume_type_get(
self.context, src_volume_type_id)
except exception.VolumeTypeNotFound:
LOG.debug("Volume type of source volume has been "
"deleted. Encrypted backup restore has "
"failed.")
msg = _("The source volume type '%s' is not "
"available.") % (src_volume_type_id)
raise exception.EncryptedBackupOperationFailed(msg)
# Update dest volume with src volume's volume_type_id.
LOG.debug("The volume type of the destination volume "
"will become the volume type of the source "
"volume.")
self.db.volume_update(self.context, volume_id,
{'volume_type_id': src_volume_type_id})
else:
# Volume type id's do not match, and destination volume
# has a volume type. Throw exception.
LOG.warning("Destination volume type is different from "
"source volume type for an encrypted volume. "
"Encrypted backup restore has failed.")
msg = (_("The source volume type '%(src)s' is different "
"than the destination volume type '%(dest)s'.") %
{'src': src_volume_type_id,
'dest': dest_vol['volume_type_id']})
raise exception.EncryptedBackupOperationFailed(msg)
def _restore_vol_meta(self, metadata, volume_id, fields):
"""Restore values to VolumeMetadata object for provided fields."""
LOG.debug("Restoring volume metadata")
metadata = self._filter(metadata, fields)
self.db.volume_metadata_update(self.context, volume_id, metadata, True)
def _restore_vol_glance_meta(self, metadata, volume_id, fields):
"""Restore values to VolumeGlanceMetadata object for provided fields.
First delete any existing metadata then save new values.
"""
LOG.debug("Restoring volume glance metadata")
metadata = self._filter(metadata, fields)
self.db.volume_glance_metadata_delete_by_volume(self.context,
volume_id)
for key, value in metadata.items():
self.db.volume_glance_metadata_create(self.context,
volume_id,
key, value)
# Now mark the volume as bootable
self.db.volume_update(self.context, volume_id,
{'bootable': True})
def _v1_restore_factory(self):
"""All metadata is backed up but we selectively restore.
Returns a dictionary of the form:
{: (, )}
Empty field list indicates that all backed up fields should be
restored.
"""
return {self.TYPE_TAG_VOL_BASE_META:
(self._restore_vol_base_meta,
['display_name', 'display_description']),
self.TYPE_TAG_VOL_META:
(self._restore_vol_meta, []),
self.TYPE_TAG_VOL_GLANCE_META:
(self._restore_vol_glance_meta, [])}
def _v2_restore_factory(self):
"""All metadata is backed up but we selectively restore.
Returns a dictionary of the form:
{: (, )}
Empty field list indicates that all backed up fields should be
restored.
"""
return {self.TYPE_TAG_VOL_BASE_META:
(self._restore_vol_base_meta,
['display_name', 'display_description', 'encryption_key_id']),
self.TYPE_TAG_VOL_META:
(self._restore_vol_meta, []),
self.TYPE_TAG_VOL_GLANCE_META:
(self._restore_vol_glance_meta, [])}
def get(self, volume_id):
"""Get volume metadata.
Returns a json-encoded dict containing all metadata and the restore
version i.e. the version used to decide what actually gets restored
from this container when doing a backup restore.
"""
container = {'version': CONF.backup_metadata_version}
self._save_vol_base_meta(container, volume_id)
self._save_vol_meta(container, volume_id)
self._save_vol_glance_meta(container, volume_id)
if container:
return jsonutils.dumps(container)
else:
return None
def put(self, volume_id, json_metadata):
"""Restore volume metadata to a volume.
The json container should contain a version that is supported here.
"""
meta_container = jsonutils.loads(json_metadata)
version = meta_container['version']
if version == 1:
factory = self._v1_restore_factory()
elif version == 2:
factory = self._v2_restore_factory()
else:
msg = (_("Unsupported backup metadata version (%s)") % (version))
raise exception.BackupMetadataUnsupportedVersion(msg)
for type in factory:
func = factory[type][0]
fields = factory[type][1]
if type in meta_container:
func(meta_container[type], volume_id, fields)
else:
LOG.debug("No metadata of type '%s' to restore", type)
class BackupDriver(base.Base, metaclass=abc.ABCMeta):
def __init__(self, context):
super().__init__()
self.context = context
self.backup_meta_api = BackupMetadataAPI(context)
# This flag indicates if backup driver supports force
# deletion. So it should be set to True if the driver that inherits
# from BackupDriver supports the force deletion function.
self.support_force_delete = False
def get_metadata(self, volume_id):
return self.backup_meta_api.get(volume_id)
def put_metadata(self, volume_id, json_metadata):
self.backup_meta_api.put(volume_id, json_metadata)
@abc.abstractmethod
def backup(self, backup, volume_file, backup_metadata=False):
"""Start a backup of a specified volume.
Some I/O operations may block greenthreads, so in order to prevent
starvation parameter volume_file will be a proxy that will execute all
methods in native threads, so the method implementation doesn't need to
worry about that..
"""
return
@abc.abstractmethod
def restore(self, backup, volume_id, volume_file, volume_is_new):
"""Restore a saved backup.
Some I/O operations may block greenthreads, so in order to prevent
starvation parameter volume_file will be a proxy that will execute all
methods in native threads, so the method implementation doesn't need to
worry about that..
May raise BackupRestoreCancel to indicate that the restoration of a
volume has been aborted by changing the backup status.
"""
return
@abc.abstractmethod
def delete_backup(self, backup):
"""Delete a saved backup."""
return
def export_record(self, backup):
"""Export driver specific backup record information.
If backup backend needs additional driver specific information to
import backup record back into the system it must overwrite this method
and return it here as a dictionary so it can be serialized into a
string.
Default backup driver implementation has no extra information.
:param backup: backup object to export
:returns: driver_info - dictionary with extra information
"""
return {}
def import_record(self, backup, driver_info):
"""Import driver specific backup record information.
If backup backend needs additional driver specific information to
import backup record back into the system it must overwrite this method
since it will be called with the extra information that was provided by
export_record when exporting the backup.
Default backup driver implementation does nothing since it didn't
export any specific data in export_record.
:param backup: backup object to export
:param driver_info: dictionary with driver specific backup record
information
:returns: nothing
"""
return
def check_for_setup_error(self):
"""Method for checking if backup backend is successfully installed.
Refer to
:obj:`cinder.interface.backup_driver.BackupDriver.check_for_setup_error`
for additional information.
"""
return
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/backup/drivers/ 0000775 0000000 0000000 00000000000 15131732575 0022557 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/backup/drivers/__init__.py 0000664 0000000 0000000 00000000000 15131732575 0024656 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/backup/drivers/ceph.py 0000664 0000000 0000000 00000202510 15131732575 0024050 0 ustar 00root root 0000000 0000000 # Copyright 2013 Canonical Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Ceph Backup Service Implementation.
This driver supports backing up volumes of any type to a Ceph object store. It
is also capable of detecting whether the volume to be backed up is a Ceph RBD
volume and, if so, attempts to perform incremental/differential backups.
Support is also included for the following in the case of a source volume being
a Ceph RBD volume:
* backing up within the same Ceph pool (not recommended)
* backing up between different Ceph pools
* backing up between different Ceph clusters
At the time of writing, differential backup support in Ceph/librbd was quite
new so this driver accounts for this by first attempting differential backup
and falling back to full backup/copy if the former fails. It is recommended
that you upgrade to Ceph Dumpling (>= v0.67) or above to get the best results.
If incremental backups are used, multiple backups of the same volume are stored
as snapshots so that minimal space is consumed in the object store and
restoring the volume takes a far reduced amount of time compared to a full
copy.
Note that Cinder supports restoring to a new volume or the original volume the
backup was taken from. For the latter case, a full copy is enforced since this
was deemed the safest action to take. It is therefore recommended to always
restore to a new volume (default).
"""
import fcntl
import json
import os
import re
import subprocess
import tempfile
import textwrap
import time
from typing import Dict, List, Optional, Tuple
import eventlet
from os_brick.initiator import linuxrbd
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from cinder.backup import driver
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder.message import api as message_api
from cinder.message import message_field
from cinder import objects
from cinder import utils
import cinder.volume.drivers.rbd as rbd_driver
from cinder.volume import volume_utils
try:
import rados
import rbd
except ImportError:
rados = None
rbd = None
LOG = logging.getLogger(__name__)
service_opts = [
cfg.StrOpt('backup_ceph_conf', default='/etc/ceph/ceph.conf',
help='Ceph configuration file to use.'),
cfg.StrOpt('backup_ceph_user', default='cinder',
help='The Ceph user to connect with. Default here is to use '
'the same user as for Cinder volumes. If not using cephx '
'this should be set to None.'),
cfg.IntOpt('backup_ceph_chunk_size', default=(units.Mi * 128),
help='The chunk size, in bytes, that a backup is broken into '
'before transfer to the Ceph object store.'),
cfg.StrOpt('backup_ceph_pool', default='backups',
help='The Ceph pool where volume backups are stored.'),
cfg.IntOpt('backup_ceph_stripe_unit', default=0,
help='RBD stripe unit to use when creating a backup image.'),
cfg.IntOpt('backup_ceph_stripe_count', default=0,
help='RBD stripe count to use when creating a backup image.'),
cfg.BoolOpt('backup_ceph_image_journals', default=False,
help='If True, apply JOURNALING and EXCLUSIVE_LOCK feature '
'bits to the backup RBD objects to allow mirroring'),
cfg.IntOpt('backup_ceph_max_snapshots', default=0,
help=textwrap.dedent("""\
Number of the most recent snapshots to keep.
0 indicates to keep an unlimited number of snapshots.
Configuring this option can save disk space by only keeping
a limited number of snapshots on the source volume storage.
However, if a user deletes all incremental backups which
still have snapshots on the source storage, the next
incremental backup will automatically become a full backup
as no common snapshot exists anymore.
""")),
cfg.BoolOpt('restore_discard_excess_bytes', default=True,
help='If True, always discard excess bytes when restoring '
'volumes i.e. pad with zeroes.')
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
class VolumeMetadataBackup(object):
def __init__(self, client: 'rados.Rados', backup_id: str):
self._client: 'rados.Rados' = client
self._backup_id: str = backup_id
@property
def name(self) -> str:
return "backup.%s.meta" % self._backup_id
@property
def exists(self) -> bool:
meta_obj = eventlet.tpool.Proxy(rados.Object(self._client.ioctx,
self.name))
return self._exists(meta_obj)
def _exists(self, obj) -> bool:
try:
obj.stat()
except rados.ObjectNotFound:
return False
else:
return True
def set(self, json_meta: str) -> None:
"""Write JSON metadata to a new object.
This should only be called once per backup. Raises
VolumeMetadataBackupExists if the object already exists.
"""
meta_obj = eventlet.tpool.Proxy(rados.Object(self._client.ioctx,
self.name))
if self._exists(meta_obj):
msg = _("Metadata backup object '%s' already exists") % self.name
raise exception.VolumeMetadataBackupExists(msg)
meta_obj.write(json_meta.encode('utf-8'))
def get(self) -> Optional[str]:
"""Get metadata backup object.
Returns None if the object does not exist.
"""
meta_obj = eventlet.tpool.Proxy(rados.Object(self._client.ioctx,
self.name))
if not self._exists(meta_obj):
LOG.debug("Metadata backup object %s does not exist", self.name)
return None
return meta_obj.read().decode('utf-8')
def remove_if_exists(self) -> None:
meta_obj = eventlet.tpool.Proxy(rados.Object(self._client.ioctx,
self.name))
try:
meta_obj.remove()
except rados.ObjectNotFound:
LOG.debug("Metadata backup object '%s' not found - ignoring",
self.name)
@interface.backupdriver
class CephBackupDriver(driver.BackupDriver):
"""Backup Cinder volumes to Ceph Object Store.
This class enables backing up Cinder volumes to a Ceph object store.
Backups may be stored in their own pool or even cluster. Store location is
defined by the Ceph conf file and service config options supplied.
If the source volume is itself an RBD volume, the backup will be performed
using incremental differential backups which *should* give a performance
gain.
"""
def __init__(self, context, execute=None):
super().__init__(context)
self.rbd = rbd
self.rados = rados
self.chunk_size = CONF.backup_ceph_chunk_size
self._execute = execute or utils.execute
self.rbd_stripe_count = 0
self.rbd_stripe_unit = 0
if self._supports_stripingv2:
self.rbd_stripe_unit = CONF.backup_ceph_stripe_unit
self.rbd_stripe_count = CONF.backup_ceph_stripe_count
elif (CONF.backup_ceph_stripe_unit != 0 or
CONF.backup_ceph_stripe_count != 0):
LOG.info("RBD striping not supported - ignoring configuration "
"settings for rbd striping.")
self._ceph_backup_user = CONF.backup_ceph_user
self._ceph_backup_pool = CONF.backup_ceph_pool
self._ceph_backup_conf = CONF.backup_ceph_conf
self.message_api = message_api.API()
@staticmethod
def get_driver_options() -> list:
return service_opts
@staticmethod
def _validate_string_args(*args: str) -> bool:
"""Ensure all args are non-None and non-empty."""
return all(args)
@staticmethod
def _ceph_args(user: str, conf: Optional[str] = None,
pool: Optional[str] = None) -> List[str]:
"""Create default ceph args for executing rbd commands.
If no --conf is provided, rbd will look in the default locations e.g.
/etc/ceph/ceph.conf
"""
# Make sure user arg is valid since rbd command may not fail if
# invalid/no user provided, resulting in unexpected behaviour.
if not CephBackupDriver._validate_string_args(user):
raise exception.BackupInvalidCephArgs(_("invalid user '%s'") %
user)
args = ['--id', user]
if conf:
args.extend(['--conf', conf])
if pool:
args.extend(['--pool', pool])
return args
@property
def _supports_layering(self) -> bool:
"""Determine if copy-on-write is supported by our version of librbd."""
return hasattr(self.rbd, 'RBD_FEATURE_LAYERING')
@property
def _supports_stripingv2(self) -> bool:
"""Determine if striping is supported by our version of librbd."""
return hasattr(self.rbd, 'RBD_FEATURE_STRIPINGV2')
@property
def _supports_exclusive_lock(self) -> bool:
"""Determine if exclusive-lock is supported by librbd."""
return hasattr(self.rbd, 'RBD_FEATURE_EXCLUSIVE_LOCK')
@property
def _supports_journaling(self) -> bool:
"""Determine if journaling is supported by our version of librbd."""
return hasattr(self.rbd, 'RBD_FEATURE_JOURNALING')
@property
def _supports_fast_diff(self) -> bool:
"""Determine if fast-diff is supported by our version of librbd."""
return hasattr(self.rbd, 'RBD_FEATURE_FAST_DIFF')
def _get_rbd_support(self) -> Tuple[bool, int]:
"""Determine RBD features supported by our version of librbd."""
old_format = True
features = 0
if self._supports_layering:
old_format = False
features |= self.rbd.RBD_FEATURE_LAYERING
if self._supports_stripingv2:
old_format = False
features |= self.rbd.RBD_FEATURE_STRIPINGV2
if CONF.backup_ceph_image_journals:
LOG.debug("RBD journaling supported by backend and requested "
"via config. Enabling it together with "
"exclusive-lock")
old_format = False
features |= (self.rbd.RBD_FEATURE_EXCLUSIVE_LOCK |
self.rbd.RBD_FEATURE_JOURNALING)
# NOTE(christian_rohmann): Check for fast-diff support and enable it
if self._supports_fast_diff:
LOG.debug("RBD also supports fast-diff, enabling it "
"together with exclusive-lock and object-map")
old_format = False
features |= (self.rbd.RBD_FEATURE_EXCLUSIVE_LOCK |
self.rbd.RBD_FEATURE_OBJECT_MAP |
self.rbd.RBD_FEATURE_FAST_DIFF)
return (old_format, features)
def check_for_setup_error(self) -> None:
"""Returns an error if prerequisites aren't met."""
if rados is None or rbd is None:
msg = _('rados and rbd python libraries not found')
raise exception.BackupDriverException(reason=msg)
for attr in ['backup_ceph_user', 'backup_ceph_pool',
'backup_ceph_conf']:
val = getattr(CONF, attr)
if not val:
raise exception.InvalidConfigurationValue(option=attr,
value=val)
# NOTE: Checking connection to ceph
# RADOSClient __init__ method invokes _connect_to_rados
# so no need to check for self.rados.Error here.
with rbd_driver.RADOSClient(self, self._ceph_backup_pool):
pass
# NOTE(christian_rohmann): Check features required for journaling
if CONF.backup_ceph_image_journals:
if not self._supports_exclusive_lock and self._supports_journaling:
LOG.error("RBD journaling not supported - unable to "
"support per image mirroring in backup pool")
raise exception.BackupInvalidCephArgs(
_("Image Journaling set but RBD backend does "
"not support journaling")
)
def _connect_to_rados(self,
pool: Optional[str] = None) -> Tuple['rados.Rados',
'rados.Ioctx']:
"""Establish connection to the backup Ceph cluster."""
client = eventlet.tpool.Proxy(self.rados.Rados(
rados_id=self._ceph_backup_user,
conffile=self._ceph_backup_conf))
try:
client.connect()
pool_to_open = pool or self._ceph_backup_pool
ioctx = client.open_ioctx(pool_to_open)
return client, ioctx
except self.rados.Error:
# shutdown cannot raise an exception
client.shutdown()
raise
@staticmethod
def _disconnect_from_rados(client: 'rados.Rados',
ioctx: 'rados.Ioctx') -> None:
"""Terminate connection with the backup Ceph cluster."""
# closing an ioctx cannot raise an exception
ioctx.close()
client.shutdown()
@staticmethod
def _format_base_name(service_metadata: str) -> str:
base_name = json.loads(service_metadata)["base"]
return base_name
@staticmethod
def _get_backup_base_name(
volume_id: str,
backup: Optional['objects.Backup'] = None) -> str:
"""Return name of base image used for backup.
Incremental backups use a new base name so we support old and new style
format.
"""
if not backup:
return "volume-%s.backup.base" % volume_id
if backup.service_metadata:
return CephBackupDriver._format_base_name(backup.service_metadata)
# 'parent' field will only be present in incremental backups. This is
# filled by cinder-api
if backup.parent:
# Old backups don't have the base name in the service_metadata,
# so we use the default RBD backup base
if backup.parent.service_metadata:
service_metadata = backup.parent.service_metadata
base_name = CephBackupDriver._format_base_name(
service_metadata)
else:
base_name = "volume-%s.backup.base" % volume_id
return base_name
return "volume-%s.backup.%s" % (volume_id, backup.id)
def _discard_bytes(self,
volume: linuxrbd.RBDVolumeIOWrapper,
offset: int,
length: int) -> None:
"""Trim length bytes from offset.
If the volume is an rbd do a discard() otherwise assume it is a file
and pad with zeroes.
"""
if length:
LOG.debug("Discarding %(length)s bytes from offset %(offset)s",
{'length': length, 'offset': offset})
if self._file_is_rbd(volume):
limit = 2 * units.Gi - 1
chunks = int(length / limit)
for chunk in range(0, chunks):
eventlet.tpool.Proxy(volume.rbd_image).discard(
offset + chunk * limit, limit)
rem = int(length % limit)
if rem:
eventlet.tpool.Proxy(volume.rbd_image).discard(
offset + chunks * limit, rem)
else:
zeroes = bytearray(self.chunk_size)
chunks = int(length / self.chunk_size)
for chunk in range(0, chunks):
LOG.debug("Writing zeroes chunk %d", chunk)
volume.write(zeroes)
volume.flush()
rem = int(length % self.chunk_size)
if rem:
zeroes = bytearray(rem)
volume.write(zeroes)
volume.flush()
def _transfer_data(self,
src: linuxrbd.RBDVolumeIOWrapper,
src_name: str,
dest: linuxrbd.RBDVolumeIOWrapper,
dest_name: str,
length: int,
discard_zeros: bool = False) -> None:
"""Transfer data between files (Python IO objects)."""
LOG.debug("Transferring data between '%(src)s' and '%(dest)s'",
{'src': src_name, 'dest': dest_name})
chunks = int(length / self.chunk_size)
LOG.debug("%(chunks)s chunks of %(bytes)s bytes to be transferred",
{'chunks': chunks, 'bytes': self.chunk_size})
for chunk in range(0, chunks):
before = time.time()
data = src.read(self.chunk_size)
# If we have reach end of source, discard any extraneous bytes from
# destination volume if trim is enabled and stop writing.
if data == b'':
if CONF.restore_discard_excess_bytes:
self._discard_bytes(dest, dest.tell(),
length - dest.tell())
return
if (discard_zeros and volume_utils.is_all_zero(data)):
action = "Discarded"
else:
dest.write(data)
dest.flush()
action = "Transferred"
delta = (time.time() - before)
rate = (self.chunk_size / delta) / 1024
LOG.debug("%(action)s chunk %(chunk)s of %(chunks)s (%(rate)dK/s)",
{'action': action,
'chunk': chunk + 1,
'chunks': chunks,
'rate': rate})
rem = int(length % self.chunk_size)
if rem:
LOG.debug("Transferring remaining %s bytes", rem)
data = src.read(rem)
if data == b'':
if CONF.restore_discard_excess_bytes:
self._discard_bytes(dest, dest.tell(), rem)
else:
dest.write(data)
dest.flush()
def _create_base_image(self,
name: str,
size: int,
rados_client: 'rados.Rados') -> None:
"""Create a base backup image.
This will be the base image used for storing differential exports.
"""
LOG.debug("Creating base image '%s'", name)
old_format, features = self._get_rbd_support()
eventlet.tpool.Proxy(self.rbd.RBD()).create(
ioctx=rados_client.ioctx,
name=name,
size=size,
old_format=old_format,
features=features,
stripe_unit=self.rbd_stripe_unit,
stripe_count=self.rbd_stripe_count)
def _delete_backup_snapshot(self,
rados_client: 'rados.Rados',
base_name: Optional[str],
backup_id: str) -> Tuple[Optional[str], int]:
"""Delete snapshot associated with this backup if one exists.
A backup should have at most ONE associated snapshot.
This is required before attempting to delete the base image. The
snapshot on the original volume can be left as it will be purged when
the volume is deleted.
Returns tuple(deleted_snap_name, num_of_remaining_snaps).
"""
remaining_snaps = 0
base_rbd = eventlet.tpool.Proxy(self.rbd.Image(rados_client.ioctx,
base_name))
try:
snap_name = self._get_backup_snap_name(base_rbd, base_name,
backup_id)
if snap_name:
LOG.debug("Deleting backup snapshot='%s'", snap_name)
base_rbd.remove_snap(snap_name)
else:
LOG.debug("No backup snapshot to delete")
# Now check whether any snapshots remain on the base image
backup_snaps = self.get_backup_snaps(base_rbd)
if backup_snaps:
remaining_snaps = len(backup_snaps)
finally:
base_rbd.close()
return snap_name, remaining_snaps
def _try_delete_base_image(self,
backup: 'objects.Backup',
base_name: Optional[str] = None) -> None:
"""Try to delete backup RBD image.
If the rbd image is a base image for incremental backups, it may have
snapshots. Delete the snapshot associated with backup_id and if the
image has no more snapshots, delete it. Otherwise return.
If no base name is provided try normal (full) format then diff format
image name.
If a base name is provided but does not exist, ImageNotFound will be
raised.
If the image is busy, a number of retries will be performed if
ImageBusy is received, after which the exception will be propagated to
the caller.
"""
retries = 3
delay = 5
try_diff_format = False
volume_id = backup.volume_id
if base_name is None:
try_diff_format = True
base_name = self._get_backup_base_name(volume_id, backup=backup)
LOG.debug("Trying diff format basename='%(basename)s' for "
"backup base image of volume %(volume)s.",
{'basename': base_name, 'volume': volume_id})
with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self,
backup.container)) as client:
rbd_exists, base_name = \
self._rbd_image_exists(base_name, volume_id, client,
try_diff_format=try_diff_format)
if not rbd_exists:
raise self.rbd.ImageNotFound(_("image %s not found") %
base_name)
while retries >= 0:
# First delete associated snapshot from base image (if exists)
snap, rem = self._delete_backup_snapshot(client, base_name,
backup.id)
if rem:
LOG.info(
"Backup base image of volume %(volume)s still "
"has %(snapshots)s snapshots so skipping base "
"image delete.",
{'snapshots': rem, 'volume': volume_id})
return
LOG.info("Deleting backup base image='%(basename)s' of "
"volume %(volume)s.",
{'basename': base_name, 'volume': volume_id})
# Delete base if no more snapshots
try:
eventlet.tpool.Proxy(self.rbd.RBD()).remove(
client.ioctx, base_name)
except self.rbd.ImageBusy:
# Allow a retry if the image is busy
if retries > 0:
LOG.info("Backup image of volume %(volume)s is "
"busy, retrying %(retries)s more time(s) "
"in %(delay)ss.",
{'retries': retries,
'delay': delay,
'volume': volume_id})
else:
LOG.error("Max retries reached deleting backup "
"%(basename)s image of volume %(volume)s.",
{'volume': volume_id,
'basename': base_name})
raise
else:
LOG.debug("Base backup image='%(basename)s' of volume "
"%(volume)s deleted.",
{'basename': base_name, 'volume': volume_id})
retries = 0
finally:
retries -= 1
# Since we have deleted the base image we can delete the source
# volume backup snapshot.
src_name = volume_id
if src_name in eventlet.tpool.Proxy(
self.rbd.RBD()).list(client.ioctx):
LOG.debug("Deleting source volume snapshot '%(snapshot)s' "
"for backup %(basename)s.",
{'snapshot': snap, 'basename': base_name})
src_rbd = eventlet.tpool.Proxy(self.rbd.Image(client.ioctx,
src_name))
try:
src_rbd.remove_snap(snap)
finally:
src_rbd.close()
def _piped_execute(self, cmd1: list, cmd2: list) -> Tuple[int, bytes]:
"""Pipe output of cmd1 into cmd2."""
LOG.debug("Piping cmd1='%s' into...", ' '.join(cmd1))
LOG.debug("cmd2='%s'", ' '.join(cmd2))
with tempfile.TemporaryFile() as errfile:
try:
p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE,
stderr=errfile,
close_fds=True)
except OSError as e:
LOG.error("Pipe1 failed - %s ", e)
raise
# NOTE(dosaboy): ensure that the pipe is blocking. This is to work
# around the case where evenlet.green.subprocess is used which
# seems to use a non-blocking pipe.
assert p1.stdout is not None
flags = fcntl.fcntl(p1.stdout, fcntl.F_GETFL) & (~os.O_NONBLOCK)
fcntl.fcntl(p1.stdout, fcntl.F_SETFL, flags)
try:
p2 = subprocess.Popen(cmd2, stdin=p1.stdout,
stdout=subprocess.PIPE,
stderr=errfile,
close_fds=True)
except OSError as e:
LOG.error("Pipe2 failed - %s ", e)
raise
p1.stdout.close()
p2.communicate()
p1.wait()
errfile.seek(0)
px_stderr = errfile.read()
return p1.returncode or p2.returncode, px_stderr
def _rbd_diff_transfer(self, src_name: str, src_pool: str,
dest_name: str, dest_pool: str,
src_user: str, src_conf: Optional[str],
dest_user: str, dest_conf: Optional[str],
src_snap: Optional[str] = None,
from_snap: Optional[str] = None) -> None:
"""Copy only extents changed between two points.
If no snapshot is provided, the diff extents will be all those changed
since the rbd volume/base was created, otherwise it will be those
changed since the snapshot was created.
"""
LOG.debug("Performing differential transfer from '%(src)s' to "
"'%(dest)s'",
{'src': src_name, 'dest': dest_name})
# NOTE(dosaboy): Need to be tolerant of clusters/clients that do
# not support these operations since at the time of writing they
# were very new.
src_ceph_args = self._ceph_args(src_user, src_conf, pool=src_pool)
dest_ceph_args = self._ceph_args(dest_user, dest_conf, pool=dest_pool)
cmd1 = ['rbd', 'export-diff'] + src_ceph_args
if from_snap is not None:
cmd1.extend(['--from-snap', from_snap])
if src_snap:
path = "%s/%s@%s" % (src_pool, src_name, src_snap)
else:
path = "%s/%s" % (src_pool, src_name)
cmd1.extend([path, '-'])
cmd2 = ['rbd', 'import-diff'] + dest_ceph_args
rbd_path = "%s/%s" % (dest_pool, dest_name)
cmd2.extend(['-', rbd_path])
ret, stderr = self._piped_execute(cmd1, cmd2)
if ret:
msg = (_("RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)") %
{'ret': ret, 'stderr': stderr})
LOG.info(msg)
raise exception.BackupRBDOperationFailed(msg)
def _rbd_image_exists(
self, name: str, volume_id: str,
client: 'rados.Rados',
try_diff_format: Optional[bool] = False) -> Tuple[bool, str]:
"""Return tuple (exists, name)."""
rbds = eventlet.tpool.Proxy(self.rbd.RBD()).list(client.ioctx)
if name not in rbds:
LOG.debug("Image '%s' not found - trying diff format name", name)
if try_diff_format:
name = CephBackupDriver._get_backup_base_name(volume_id)
if name not in rbds:
LOG.debug("Diff format image '%s' not found", name)
return False, name
else:
return False, name
return True, name
def _snap_exists(self,
base_name: str,
snap_name: str,
client: 'rados.Rados') -> bool:
"""Return True if snapshot exists in base image."""
base_rbd = eventlet.tpool.Proxy(self.rbd.Image(client.ioctx,
base_name, read_only=True))
try:
snaps = base_rbd.list_snaps()
if snaps is None:
return False
for snap in snaps:
if snap['name'] == snap_name:
return True
finally:
base_rbd.close()
return False
def _full_rbd_backup(self,
container: str,
base_name: str,
length: int) -> Tuple[Optional[str], bool]:
"""Create the base_image for a full RBD backup."""
with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self,
container)) as client:
self._create_base_image(base_name, length, client)
# Now we just need to return from_snap=None and image_created=True, if
# there is some exception in making backup snapshot, will clean up the
# base image.
return None, True
def _incremental_rbd_backup(
self, backup: 'objects.Backup',
base_name: str, length: int,
source_rbd_image, volume_id: str) -> Tuple[Optional[str], bool]:
"""Select the last snapshot for a RBD incremental backup."""
container = backup.container
last_incr = backup.parent_id
LOG.debug("Trying to perform an incremental backup with container: "
"%(container)s, base_name: %(base)s, source RBD image: "
"%(source)s, volume ID %(volume)s and last incremental "
"backup ID: %(incr)s.",
{'container': container,
'base': base_name,
'source': source_rbd_image,
'volume': volume_id,
'incr': last_incr,
})
with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self,
container)) as client:
try:
base_rbd = eventlet.tpool.Proxy(
self.rbd.Image(client.ioctx, base_name, read_only=True))
except rbd.ImageNotFound:
msg = (_(
"Can't find base name image %(base)s.") %
{'base': base_name})
LOG.error(msg)
raise exception.BackupRBDOperationFailed(msg)
try:
from_snap = self._get_backup_snap_name(base_rbd,
base_name,
last_incr)
if from_snap is None:
msg = (_(
"Can't find snapshot from parent %(incr)s and "
"base name image %(base)s.") %
{'incr': last_incr, 'base': base_name})
LOG.error(msg)
raise exception.BackupRBDOperationFailed(msg)
finally:
base_rbd.close()
return from_snap, False
def _backup_rbd(self,
backup: 'objects.Backup',
volume_file: linuxrbd.RBDVolumeIOWrapper,
volume_name: str, length: int) -> Dict[str, str]:
"""Create an incremental or full backup from an RBD image."""
rbd_user = volume_file.rbd_user
rbd_pool = volume_file.rbd_pool
rbd_conf = volume_file.rbd_conf
source_rbd_image = eventlet.tpool.Proxy(volume_file.rbd_image)
volume_id = backup.volume_id
base_name = self._get_backup_base_name(volume_id, backup=backup)
snaps_to_keep = CONF.backup_ceph_max_snapshots
# If backup.parent_id is None performs full RBD backup
if backup.parent_id is None:
from_snap, image_created = self._full_rbd_backup(backup.container,
base_name,
length)
# Otherwise performs incremental rbd backup
else:
# Check if there is at least one snapshot to base an incremental
# backup on. If not, we cannot perform an incremental backup and
# fall back to full backup.
no_source_snaps = snaps_to_keep > 0 and \
self._get_backup_snap_name(
source_rbd_image,
base_name,
backup.parent_id) is None
# If true, force full backup
if no_source_snaps:
# Unset parent so we get a new backup base name
backup.parent = None
# The backup will be a full one, so it has no parent ID.
# This will mark the backup as a full backup in the database.
backup.parent_id = None
backup.save()
base_name = self.\
_get_backup_base_name(volume_id, backup=backup)
LOG.info("Incremental backup was requested, but there are no "
"snapshots present to use as base, "
"forcing full backup.")
self.message_api.create(
context=self.context,
action=message_field.Action.BACKUP_CREATE,
resource_uuid=volume_id,
detail=message_field.Detail.
INCREMENTAL_BACKUP_FORCES_FULL_BACKUP,
level="WARNING"
)
from_snap, image_created = self._full_rbd_backup(
backup.container,
base_name,
length)
else:
# Incremental backup
rbd_img = source_rbd_image
from_snap, image_created = \
self._incremental_rbd_backup(backup,
base_name,
length,
rbd_img,
volume_id)
LOG.debug("Using --from-snap '%(snap)s' for incremental backup of "
"volume %(volume)s.",
{'snap': from_snap, 'volume': volume_id})
# Snapshot source volume so that we have a new point-in-time
new_snap = self._get_new_snap_name(backup.id)
LOG.debug("Creating backup snapshot='%s'", new_snap)
source_rbd_image.create_snap(new_snap)
# Attempt differential backup. If this fails, perhaps because librbd
# or Ceph cluster version does not support it, do a full backup
# instead.
#
# TODO(dosaboy): find a way to determine if the operation is supported
# rather than brute force approach.
try:
before = time.time()
self._rbd_diff_transfer(volume_name, rbd_pool, base_name,
backup.container,
src_user=rbd_user,
src_conf=rbd_conf,
dest_user=self._ceph_backup_user,
dest_conf=self._ceph_backup_conf,
src_snap=new_snap,
from_snap=from_snap)
LOG.debug("Differential backup transfer completed in %.4fs",
(time.time() - before))
# only keep last n snapshots and delete older ones
if snaps_to_keep > 0:
self._remove_last_snapshots(source_rbd_image, snaps_to_keep)
else:
LOG.debug("Not deleting any snapshots because "
"all should be kept")
except exception.BackupRBDOperationFailed:
with excutils.save_and_reraise_exception():
LOG.debug("Differential backup transfer failed")
# Clean up if image was created as part of this operation
if image_created:
self._try_delete_base_image(backup, base_name=base_name)
# Delete snapshot
LOG.debug("Deleting diff backup snapshot='%(snapshot)s' of "
"source volume='%(volume)s'.",
{'snapshot': new_snap, 'volume': volume_id})
source_rbd_image.remove_snap(new_snap)
return {'service_metadata': '{"base": "%s"}' % base_name}
def _remove_last_snapshots(self, source_rbd_image, snaps_to_keep: int):
# only keep last n snapshots and delete older ones for the source
# image provided
snap_list = []
try:
snap_list = self.get_backup_snaps(source_rbd_image)
except Exception as e:
LOG.debug(
"Failed to get snapshot list for %s: %s", source_rbd_image, e
)
remaining_snaps = len(snap_list)
LOG.debug("Snapshot list: %s", snap_list)
if remaining_snaps > snaps_to_keep:
snaps_to_delete = remaining_snaps - snaps_to_keep
LOG.debug(
"There are %s snapshots and %s should be kept, "
"deleting the oldest %s snapshots",
remaining_snaps,
snaps_to_keep,
snaps_to_delete,
)
for i in range(snaps_to_delete):
LOG.debug("Deleting snapshot %s", snap_list[i])
try:
source_rbd_image.remove_snap(snap_list[i]["name"])
except Exception as e:
LOG.debug(
"Failed to delete snapshot %s: %s", snap_list[i], e
)
else:
LOG.debug(
"There are %s snapshots and %s should be kept, "
"not deleting any snapshots",
remaining_snaps,
snaps_to_keep,
)
@staticmethod
def _file_is_rbd(volume_file: linuxrbd.RBDVolumeIOWrapper) -> bool:
"""Returns True if the volume_file is actually an RBD image."""
return hasattr(volume_file, 'rbd_image')
def _full_backup(self, backup: 'objects.Backup',
src_volume: linuxrbd.RBDVolumeIOWrapper,
src_name: str, length: int) -> None:
"""Perform a full backup of src volume.
First creates a base backup image in our backup location then performs
an chunked copy of all data from source volume to a new backup rbd
image.
"""
volume_id = backup.volume_id
if backup.snapshot_id:
backup_name = self._get_backup_base_name(volume_id)
else:
backup_name = self._get_backup_base_name(volume_id, backup=backup)
with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self,
backup.container)) as client:
# First create base backup image
old_format, features = self._get_rbd_support()
LOG.debug("Creating backup base image='%(name)s' for volume "
"%(volume)s.",
{'name': backup_name, 'volume': volume_id})
eventlet.tpool.Proxy(self.rbd.RBD()).create(
ioctx=client.ioctx,
name=backup_name,
size=length,
old_format=old_format,
features=features,
stripe_unit=self.rbd_stripe_unit,
stripe_count=self.rbd_stripe_count)
LOG.debug("Copying data from volume %s.", volume_id)
dest_rbd = eventlet.tpool.Proxy(self.rbd.Image(client.ioctx,
backup_name))
meta_io_proxy = None
try:
rbd_meta = linuxrbd.RBDImageMetadata(dest_rbd,
backup.container,
self._ceph_backup_user,
self._ceph_backup_conf)
rbd_fd = linuxrbd.RBDVolumeIOWrapper(rbd_meta)
meta_io_proxy = eventlet.tpool.Proxy(rbd_fd)
self._transfer_data(src_volume, src_name, meta_io_proxy,
backup_name, length)
finally:
# Closing the wrapper will close the image as well
if meta_io_proxy:
meta_io_proxy.close()
else:
dest_rbd.close()
@staticmethod
def backup_snapshot_name_pattern() -> str:
"""Returns the pattern used to match backup snapshots.
It is essential that snapshots created for purposes other than backups
do not have this name format.
"""
return r"^backup\.([a-z0-9\-]+?)\.snap\.(.+)$"
@classmethod
def get_backup_snaps(cls, rbd_image: 'rbd.Image',
sort: bool = False) -> List[dict]:
"""Get all backup snapshots for the given rbd image.
NOTE: this call is made public since these snapshots must be deleted
before the base volume can be deleted.
"""
snaps = rbd_image.list_snaps()
backup_snaps = []
for snap in snaps:
search_key = cls.backup_snapshot_name_pattern()
result = re.search(search_key, snap['name'])
if result:
backup_snaps.append({'name': result.group(0),
'backup_id': result.group(1),
'timestamp': result.group(2)})
if sort:
# Sort into ascending order of timestamp
backup_snaps.sort(key=lambda x: x['timestamp'], reverse=True)
return backup_snaps
def _get_new_snap_name(self, backup_id: str) -> str:
return "backup.%s.snap.%s" % (backup_id, time.time())
def _get_backup_snap_name(self, rbd_image: 'rbd.Image',
name: Optional[str], backup_id: str):
"""Return the name of the snapshot associated with backup_id.
The rbd image provided must be the base image used for an incremental
backup.
A backup is only allowed ONE associated snapshot. If more are found,
exception.BackupOperationError is raised.
"""
snaps = self.get_backup_snaps(rbd_image)
LOG.debug("Looking for snapshot of backup base '%s'", name)
if not snaps:
LOG.debug("Backup base '%s' has no snapshots", name)
return None
snaps = [snap['name'] for snap in snaps
if snap['backup_id'] == backup_id]
if not snaps:
LOG.debug("Backup '%s' has no snapshot", backup_id)
return None
if len(snaps) > 1:
msg = (_("Backup should only have one snapshot but instead has %s")
% len(snaps))
raise exception.BackupOperationError(msg)
LOG.debug("Found snapshot '%s'", snaps[0])
return snaps[0]
def _get_volume_size_bytes(self, volume: 'objects.Volume') -> int:
"""Return the size in bytes of the given volume.
Raises exception.InvalidParameterValue if volume size is 0.
"""
if int(volume['size']) == 0:
errmsg = _("Need non-zero volume size")
raise exception.InvalidParameterValue(errmsg)
return int(volume['size']) * units.Gi
def _backup_metadata(self, backup: 'objects.Backup') -> None:
"""Backup volume metadata.
NOTE(dosaboy): the metadata we are backing up is obtained from a
versioned api so we should not alter it in any way here.
We must also be sure that the service that will perform
the restore is compatible with version used.
"""
json_meta = self.get_metadata(backup.volume_id)
if not json_meta:
LOG.debug("No metadata to backup for volume %s.", backup.volume_id)
return
LOG.debug("Backing up metadata for volume %s.", backup.volume_id)
try:
with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self,
backup.container)) as client:
vol_meta_backup = VolumeMetadataBackup(client, backup.id)
vol_meta_backup.set(json_meta)
except exception.VolumeMetadataBackupExists as e:
msg = (_("Failed to backup volume metadata - %s") % e)
raise exception.BackupOperationError(msg)
def backup(self, backup: 'objects.Backup',
volume_file: linuxrbd.RBDVolumeIOWrapper,
backup_metadata: bool = True) -> dict:
"""Backup volume and metadata (if available) to Ceph object store.
If the source volume is an RBD we will attempt to do an
incremental/differential backup, otherwise a full copy is performed.
If this fails we will attempt to fall back to full copy.
"""
volume = self.db.volume_get(self.context, backup.volume_id)
updates = {}
if not backup.container:
backup.container = self._ceph_backup_pool
backup.save()
LOG.debug("Starting backup of volume='%s'.", volume.id)
# Ensure we are at the beginning of the volume
volume_file.seek(0)
length = self._get_volume_size_bytes(volume)
if backup.snapshot_id:
do_full_backup = True
elif self._file_is_rbd(volume_file):
# If volume an RBD, attempt incremental or full backup.
do_full_backup = False
LOG.debug("Volume file is RBD: attempting optimized backup")
try:
updates = self._backup_rbd(backup, volume_file, volume.name,
length)
except exception.BackupRBDOperationFailed:
with excutils.save_and_reraise_exception():
self.delete_backup(backup)
else:
if backup.parent_id:
LOG.debug("Volume file is NOT RBD: can't perform "
"incremental backup.")
raise exception.BackupRBDOperationFailed
LOG.debug("Volume file is NOT RBD: will do full backup.")
do_full_backup = True
if do_full_backup:
try:
self._full_backup(backup, volume_file, volume.name, length)
except exception.BackupOperationError:
with excutils.save_and_reraise_exception():
self.delete_backup(backup)
if backup_metadata:
try:
self._backup_metadata(backup)
except exception.BackupOperationError:
with excutils.save_and_reraise_exception():
# Cleanup.
self.delete_backup(backup)
LOG.debug("Backup '%(backup_id)s' of volume %(volume_id)s finished.",
{'backup_id': backup.id, 'volume_id': volume.id})
return updates
def _full_restore(self, backup: 'objects.Backup',
dest_file,
dest_name: str,
length: int,
volume_is_new: bool,
src_snap=None) -> None:
"""Restore volume using full copy i.e. all extents.
This will result in all extents being copied from source to
destination.
:param backup: Backup object describing the backup to be restored.
:param dest_file: File object of the destination volume.
:param dest_name: Name of the destination volume.
:param length: Size of the destination volume in bytes.
:param volume_is_new: True if the destination volume is new.
:param src_snap: A string, the name of the restore point snapshot,
optional, used for incremental backups or RBD backup.
"""
with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self,
backup.container)) as client:
# In case of snapshot_id, the old base name format is used:
# volume-.backup.base
# Otherwise, the new base name format is used:
# volume-.backup-
# Should match the base name format in _full_backup()
if backup.snapshot_id:
backup_name = self._get_backup_base_name(backup.volume_id)
else:
backup_name = self._get_backup_base_name(backup.volume_id,
backup=backup)
try:
# Retrieve backup volume
_src = src_snap
src_rbd = eventlet.tpool.Proxy(self.rbd.Image(client.ioctx,
backup_name,
snapshot=_src,
read_only=True))
except rbd.ImageNotFound:
# Check for another base name as a fallback mechanism, in case
# the backup image is not found under the expected name.
# The main reason behind having two different base name formats
# is due to a change in the naming convention at some point in
# the history of the Cinder project.
# This approach ensures backward compatibility and makes it
# possible to restore older backups that were created before
# the change.
tried_name = backup_name
if backup.snapshot_id:
backup_name = self._get_backup_base_name(backup.volume_id,
backup=backup)
else:
backup_name = self._get_backup_base_name(backup.volume_id)
msg = (_("Backup %(backup_id)s of volume %(volume_id)s"
" not found with name %(tried_name)s,"
" trying a legacy name %(next_name)s.") %
{'backup_id': backup.id,
'volume_id': backup.volume_id,
'tried_name': tried_name,
'next_name': backup_name})
LOG.info(msg)
src_rbd = eventlet.tpool.Proxy(self.rbd.Image(
client.ioctx,
backup_name,
snapshot=_src,
read_only=True))
try:
rbd_meta = linuxrbd.RBDImageMetadata(src_rbd,
backup.container,
self._ceph_backup_user,
self._ceph_backup_conf)
rbd_fd = linuxrbd.RBDVolumeIOWrapper(rbd_meta)
self._transfer_data(eventlet.tpool.Proxy(rbd_fd), backup_name,
dest_file, dest_name, length,
discard_zeros=volume_is_new)
finally:
src_rbd.close()
def _check_restore_vol_size(self, backup: 'objects.Backup',
restore_vol, restore_length: int,
src_pool) -> None:
"""Ensure that the restore volume is the correct size.
If the restore volume was bigger than the backup, the diff restore will
shrink it to the size of the original backup so we need to
post-process and resize it back to its expected size.
"""
backup_base = self._get_backup_base_name(backup.volume_id,
backup=backup)
with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self,
backup.container)) as client:
adjust_size = 0
base_image = eventlet.tpool.Proxy(self.rbd.Image(client.ioctx,
backup_base,
read_only=True))
try:
if restore_length != base_image.size():
adjust_size = restore_length
finally:
base_image.close()
if adjust_size:
LOG.debug("Adjusting restore vol size")
restore_vol.rbd_image.resize(adjust_size)
def _diff_restore_rbd(self, backup: 'objects.Backup',
restore_file,
restore_name: str,
restore_point: Optional[str],
restore_length: int) -> None:
"""Attempt restore rbd volume from backup using diff transfer."""
rbd_user = restore_file.rbd_user
rbd_pool = restore_file.rbd_pool
rbd_conf = restore_file.rbd_conf
base_name = self._get_backup_base_name(backup.volume_id,
backup=backup)
LOG.debug("Attempting incremental restore from base='%(base)s' "
"snap='%(snap)s'",
{'base': base_name, 'snap': restore_point})
before = time.time()
try:
self._rbd_diff_transfer(base_name, backup.container,
restore_name, rbd_pool,
src_user=self._ceph_backup_user,
src_conf=self._ceph_backup_conf,
dest_user=rbd_user, dest_conf=rbd_conf,
src_snap=restore_point)
except exception.BackupRBDOperationFailed:
LOG.exception("Differential restore failed, trying full restore")
raise
# If the volume we are restoring to is larger than the backup volume,
# we will need to resize it after the diff import since import-diff
# appears to shrink the target rbd volume to the size of the original
# backup volume.
self._check_restore_vol_size(backup, restore_file, restore_length,
rbd_pool)
LOG.debug("Restore transfer completed in %.4fs",
(time.time() - before))
def _get_restore_point(self,
base_name: str,
backup_id: str) -> Optional[str]:
"""Get restore point snapshot name for incremental backup.
If the backup was not incremental (determined by the fact that the
base has no snapshots/restore points), None is returned. Otherwise, the
restore point associated with backup_id is returned.
"""
with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self,
self._ceph_backup_pool)) as client:
base_rbd = eventlet.tpool.Proxy(self.rbd.Image(client.ioctx,
base_name, read_only=True))
try:
restore_point = self._get_backup_snap_name(base_rbd, base_name,
backup_id)
finally:
base_rbd.close()
return restore_point
@staticmethod
def _rbd_has_extents(rbd_volume) -> bool:
"""Check whether the given rbd volume has extents.
Return True if has extents, otherwise False.
"""
extents = []
def iter_cb(offset, length, exists):
if exists:
extents.append(length)
rbd_volume.diff_iterate(0, rbd_volume.size(), None, iter_cb)
if extents:
LOG.debug("RBD has %s extents", sum(extents))
return True
return False
def _diff_restore_allowed(self, base_name: str, backup: 'objects.Backup',
volume: 'objects.Volume',
volume_file: linuxrbd.RBDVolumeIOWrapper,
rados_client: 'rados.Rados'
) -> Tuple[bool, Optional[str]]:
"""Determine if differential restore is possible and restore point.
Determine whether a differential restore is possible/allowed,
and find out the restore point if backup base is diff-format.
In order for a differential restore to be performed we need:
* destination volume must be RBD
* destination volume must have zero extents
* backup base image must exist
* backup must have a restore point
* target volume is different from source volume of backup
Returns True if differential restore is allowed, False otherwise.
Return the restore point if back base is diff-format.
"""
# NOTE(dosaboy): base_name here must be diff format.
rbd_exists, base_name = self._rbd_image_exists(base_name,
backup.volume_id,
rados_client)
if not rbd_exists:
return False, None
# Get the restore point. If no restore point is found, we assume
# that the backup was not performed using diff/incremental methods
# so we enforce full copy.
restore_point = self._get_restore_point(base_name, backup.id)
if restore_point:
if self._file_is_rbd(volume_file):
LOG.debug("Volume file is RBD.")
# If the volume we are restoring to is the volume the backup
# was made from, force a full restore since a diff will not
# work in this case.
if volume.id == backup.volume_id:
LOG.debug("Destination volume is same as backup source "
"volume %s - forcing full copy.", volume.id)
return False, restore_point
# If the destination volume has extents we cannot allow a diff
# restore.
if self._rbd_has_extents(volume_file.rbd_image):
# We return the restore point so that a full copy is done
# from snapshot.
LOG.debug("Destination has extents - forcing full copy")
return False, restore_point
return True, restore_point
else:
LOG.debug("Volume file is NOT RBD.")
else:
LOG.info("No restore point found for backup='%(backup)s' of "
"volume %(volume)s although base image is found - "
"forcing full copy.",
{'backup': backup.id,
'volume': backup.volume_id})
return False, restore_point
def _restore_volume(self,
backup: 'objects.Backup',
volume: 'objects.Volume',
volume_file: linuxrbd.RBDVolumeIOWrapper,
volume_is_new: bool) -> None:
"""Restore volume from backup using diff transfer if possible.
Attempts a differential restore and reverts to full copy if diff fails.
"""
length = int(volume.size) * units.Gi
if backup.service_metadata:
base_name = self._get_backup_base_name(backup.volume_id, backup)
else:
base_name = self._get_backup_base_name(backup.volume_id)
with eventlet.tpool.Proxy(rbd_driver.RADOSClient(
self, backup.container)) as client:
diff_allowed, restore_point = \
self._diff_restore_allowed(base_name, backup, volume,
volume_file, client)
do_full_restore = True
if diff_allowed:
# Attempt diff
try:
LOG.debug("Attempting differential restore.")
self._diff_restore_rbd(backup, volume_file, volume.name,
restore_point, length)
do_full_restore = False
except exception.BackupRBDOperationFailed:
LOG.debug("Forcing full restore to volume %s.",
volume.id)
if do_full_restore:
# Otherwise full copy
LOG.debug("Running full restore.")
self._full_restore(backup, volume_file, volume.name,
length, volume_is_new, src_snap=restore_point)
def _restore_metadata(self,
backup: 'objects.Backup',
volume_id: str) -> None:
"""Restore volume metadata from backup.
If this backup has associated metadata, save it to the restore target
otherwise do nothing.
"""
try:
with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self)) as client:
meta_bak = VolumeMetadataBackup(client, backup.id)
meta = meta_bak.get()
if meta is not None:
self.put_metadata(volume_id, meta)
else:
LOG.debug("Volume %s has no backed up metadata.",
backup.volume_id)
except exception.BackupMetadataUnsupportedVersion:
msg = _("Metadata restore failed due to incompatible version")
raise exception.BackupOperationError(msg)
def restore(self,
backup: 'objects.Backup',
volume_id: str,
volume_file: linuxrbd.RBDVolumeIOWrapper,
volume_is_new: bool) -> None:
"""Restore volume from backup in Ceph object store.
If volume metadata is available this will also be restored.
"""
target_volume = self.db.volume_get(self.context, volume_id)
LOG.debug('Starting restore from Ceph backup=%(src)s to '
'volume=%(dest)s new=%(new)s',
{'src': backup.id, 'dest': target_volume.name,
'new': volume_is_new})
try:
self._restore_volume(backup, target_volume, volume_file,
volume_is_new)
# Be tolerant of IO implementations that do not support fileno()
try:
fileno = volume_file.fileno()
except IOError:
LOG.debug("Restore target I/O object does not support "
"fileno() - skipping call to fsync().")
else:
os.fsync(fileno)
self._restore_metadata(backup, volume_id)
LOG.debug('Restore to volume %s finished successfully.',
volume_id)
except exception.BackupOperationError as e:
LOG.error('Restore to volume %(volume)s finished with error - '
'%(error)s.', {'error': e, 'volume': volume_id})
raise
def delete_backup(self, backup: 'objects.Backup') -> None:
"""Delete the given backup from Ceph object store."""
LOG.debug('Delete started for backup=%s', backup.id)
delete_failed = False
has_pool = True
try:
self._try_delete_base_image(backup)
except self.rbd.ImageNotFound:
LOG.warning(
"RBD image for backup %(backup)s of volume %(volume)s "
"not found. Deleting backup metadata.",
{'backup': backup.id, 'volume': backup.volume_id})
delete_failed = True
except self.rados.ObjectNotFound:
LOG.warning("The pool %(pool)s doesn't exist.",
{'pool': backup.container})
delete_failed = True
has_pool = False
if has_pool:
with eventlet.tpool.Proxy(rbd_driver.RADOSClient(
self, backup.container)) as client:
VolumeMetadataBackup(client, backup.id).remove_if_exists()
if delete_failed:
LOG.info("Delete of backup '%(backup)s' for volume '%(volume)s' "
"finished with warning.",
{'backup': backup.id, 'volume': backup.volume_id})
else:
LOG.debug("Delete of backup '%(backup)s' for volume "
"'%(volume)s' finished.",
{'backup': backup.id, 'volume': backup.volume_id})
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/backup/drivers/gcs.py 0000664 0000000 0000000 00000036136 15131732575 0023716 0 ustar 00root root 0000000 0000000 # Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2014 TrilioData, Inc
# Copyright (c) 2015 EMC Corporation
# Copyright (C) 2015 Kevin Fox
# Copyright (C) 2015 Tom Barron
# Copyright (C) 2016 Vedams Inc.
# Copyright (C) 2016 Google Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a backup service using Google Cloud Storage(GCS)
Google Cloud Storage json apis are used for backup operations.
Authentication and authorization are based on OAuth2.0.
Server-centric flow is used for authentication.
"""
import base64
import hashlib
import io
import os
try:
from google.auth import exceptions as gexceptions
from google.oauth2 import service_account
import google_auth_httplib2
except ImportError:
service_account = google_auth_httplib2 = gexceptions = None
from googleapiclient import discovery
from googleapiclient import errors
from googleapiclient import http
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from cinder.backup import chunkeddriver
from cinder import exception
from cinder.i18n import _
from cinder import interface
LOG = logging.getLogger(__name__)
gcsbackup_service_opts = [
cfg.StrOpt('backup_gcs_bucket',
help='The GCS bucket to use.'),
cfg.IntOpt('backup_gcs_object_size',
default=52428800,
help='The size in bytes of GCS backup objects.'),
cfg.IntOpt('backup_gcs_block_size',
default=32768,
help='The size in bytes that changes are tracked '
'for incremental backups. backup_gcs_object_size '
'has to be multiple of backup_gcs_block_size.'),
cfg.IntOpt('backup_gcs_reader_chunk_size',
default=2097152,
help='GCS object will be downloaded in chunks of bytes.'),
cfg.IntOpt('backup_gcs_writer_chunk_size',
default=2097152,
help='GCS object will be uploaded in chunks of bytes. '
'Pass in a value of -1 if the file '
'is to be uploaded as a single chunk.'),
cfg.IntOpt('backup_gcs_num_retries',
default=3,
help='Number of times to retry.'),
cfg.ListOpt('backup_gcs_retry_error_codes',
default=['429'],
help='List of GCS error codes.'),
cfg.StrOpt('backup_gcs_bucket_location',
default='US',
help='Location of GCS bucket.'),
cfg.StrOpt('backup_gcs_storage_class',
default='NEARLINE',
help='Storage class of GCS bucket.'),
cfg.StrOpt('backup_gcs_credential_file',
help='Absolute path of GCS service account credential file.'),
cfg.StrOpt('backup_gcs_project_id',
help='Owner project id for GCS bucket.'),
cfg.StrOpt('backup_gcs_user_agent',
default='gcscinder',
help='Http user-agent string for gcs api.'),
cfg.BoolOpt('backup_gcs_enable_progress_timer',
default=True,
help='Enable or Disable the timer to send the periodic '
'progress notifications to Ceilometer when backing '
'up the volume to the GCS backend storage. The '
'default value is True to enable the timer.'),
cfg.URIOpt('backup_gcs_proxy_url',
help='URL for http proxy access.',
secret=True),
]
CONF = cfg.CONF
CONF.register_opts(gcsbackup_service_opts)
OAUTH_EXCEPTIONS = None
# Google Cloud Storage(GCS) backup driver
class GCSConnectionFailure(exception.BackupDriverException):
message = _("Google Cloud Storage connection failure: %(reason)s")
class GCSApiFailure(exception.BackupDriverException):
message = _("Google Cloud Storage api failure: %(reason)s")
class GCSOAuth2Failure(exception.BackupDriverException):
message = _("Google Cloud Storage oauth2 failure: %(reason)s")
def gcs_logger(func):
def func_wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except errors.Error as err:
raise GCSApiFailure(reason=err)
except OAUTH_EXCEPTIONS as err:
raise GCSOAuth2Failure(reason=err)
except Exception as err:
raise GCSConnectionFailure(reason=err)
return func_wrapper
@interface.backupdriver
class GoogleBackupDriver(chunkeddriver.ChunkedBackupDriver):
"""Provides backup, restore and delete of backup objects within GCS."""
def __init__(self, context):
global OAUTH_EXCEPTIONS
backup_bucket = CONF.backup_gcs_bucket
self.gcs_project_id = CONF.backup_gcs_project_id
chunk_size_bytes = CONF.backup_gcs_object_size
sha_block_size_bytes = CONF.backup_gcs_block_size
enable_progress_timer = CONF.backup_gcs_enable_progress_timer
super().__init__(
context,
chunk_size_bytes,
sha_block_size_bytes,
backup_bucket,
enable_progress_timer,
)
self.reader_chunk_size = CONF.backup_gcs_reader_chunk_size
self.writer_chunk_size = CONF.backup_gcs_writer_chunk_size
self.bucket_location = CONF.backup_gcs_bucket_location
self.storage_class = CONF.backup_gcs_storage_class
self.num_retries = CONF.backup_gcs_num_retries
# Set or overwrite environmental proxy variables for httplib2 since
# it's the only mechanism supported when using googleapiclient with
# google-auth
if CONF.backup_gcs_proxy_url:
os.environ['http_proxy'] = CONF.backup_gcs_proxy_url
backup_credential = CONF.backup_gcs_credential_file
# service_account is imported if all required libraries are available
if service_account:
creds = service_account.Credentials.from_service_account_file(
backup_credential)
OAUTH_EXCEPTIONS = (gexceptions.RefreshError,
gexceptions.DefaultCredentialsError)
else:
# NOTE(tkajinam): google-api-python-client is now in requirements
# and google-auth-httplib2 is its dependency. So
# this error should not be raised now. But it's
# kept now in case the client library is moved to
# extra dependencies
msg = _('google-api-python-client not found')
raise exception.BackupDriverException(reason=msg)
self.conn = discovery.build('storage',
'v1',
# Avoid log error on oauth2client >= 4.0.0
cache_discovery=False,
credentials=creds)
self.resumable = self.writer_chunk_size != -1
@staticmethod
def get_driver_options():
return gcsbackup_service_opts
def check_for_setup_error(self):
required_options = ('backup_gcs_bucket', 'backup_gcs_credential_file',
'backup_gcs_project_id')
for opt in required_options:
val = getattr(CONF, opt, None)
if not val:
raise exception.InvalidConfigurationValue(option=opt,
value=val)
@gcs_logger
def put_container(self, bucket):
"""Create the bucket if not exists."""
buckets = self.conn.buckets().list(
project=self.gcs_project_id,
prefix=bucket,
fields="items(name)").execute(
num_retries=self.num_retries).get('items', [])
if not any(b.get('name') == bucket for b in buckets):
self.conn.buckets().insert(
project=self.gcs_project_id,
body={'name': bucket,
'location': self.bucket_location,
'storageClass': self.storage_class}).execute(
num_retries=self.num_retries)
@gcs_logger
def get_container_entries(self, bucket, prefix):
"""Get bucket entry names."""
obj_list_dict = self.conn.objects().list(
bucket=bucket,
fields="items(name)",
prefix=prefix).execute(num_retries=self.num_retries).get(
'items', [])
return [obj_dict.get('name') for obj_dict in obj_list_dict]
def get_object_writer(self, bucket, object_name, extra_metadata=None):
"""Return a writer object.
Returns a writer object that stores a chunk of volume data in a
GCS object store.
"""
return GoogleObjectWriter(bucket, object_name, self.conn,
self.writer_chunk_size,
self.num_retries,
self.resumable)
def get_object_reader(self, bucket, object_name, extra_metadata=None):
"""Return reader object.
Returns a reader object that retrieves a chunk of backed-up volume data
from a GCS object store.
"""
return GoogleObjectReader(bucket, object_name, self.conn,
self.reader_chunk_size,
self.num_retries)
@gcs_logger
def delete_object(self, bucket, object_name):
"""Deletes a backup object from a GCS object store."""
self.conn.objects().delete(
bucket=bucket,
object=object_name).execute(num_retries=self.num_retries)
def _generate_object_name_prefix(self, backup):
"""Generates a GCS backup object name prefix.
prefix = volume_volid/timestamp/az_saz_backup_bakid
volid is volume id.
timestamp is time in UTC with format of YearMonthDateHourMinuteSecond.
saz is storage_availability_zone.
bakid is backup id for volid.
"""
az = 'az_%s' % self.az
backup_name = '%s_backup_%s' % (az, backup.id)
volume = 'volume_%s' % (backup.volume_id)
timestamp = timeutils.utcnow().strftime("%Y%m%d%H%M%S")
prefix = volume + '/' + timestamp + '/' + backup_name
LOG.debug('generate_object_name_prefix: %s', prefix)
return prefix
def update_container_name(self, backup, bucket):
"""Use the bucket name as provided - don't update."""
return
def get_extra_metadata(self, backup, volume):
"""GCS driver does not use any extra metadata."""
return
class GoogleObjectWriter(object):
def __init__(self, bucket, object_name, conn, writer_chunk_size,
num_retries, resumable):
self.bucket = bucket
self.object_name = object_name
self.conn = conn
self.data = bytearray()
self.chunk_size = writer_chunk_size
self.num_retries = num_retries
self.resumable = resumable
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def write(self, data):
self.data += data
@gcs_logger
def close(self):
media = http.MediaIoBaseUpload(io.BytesIO(self.data),
'application/octet-stream',
chunksize=self.chunk_size,
resumable=self.resumable)
resp = self.conn.objects().insert(
bucket=self.bucket,
name=self.object_name,
body={},
media_body=media).execute(num_retries=self.num_retries)
etag = resp['md5Hash']
md5 = hashlib.md5(self.data, usedforsecurity=False).digest()
md5 = md5.encode('utf-8')
etag = bytes(etag, 'utf-8')
md5 = base64.b64encode(md5)
if etag != md5:
err = _('MD5 of object: %(object_name)s before: '
'%(md5)s and after: %(etag)s is not same.') % {
'object_name': self.object_name,
'md5': md5, 'etag': etag, }
raise exception.InvalidBackup(reason=err)
else:
LOG.debug('MD5 before: %(md5)s and after: %(etag)s '
'writing object: %(object_name)s in GCS.',
{'etag': etag, 'md5': md5,
'object_name': self.object_name, })
return md5
class GoogleObjectReader(object):
def __init__(self, bucket, object_name, conn, reader_chunk_size,
num_retries):
self.bucket = bucket
self.object_name = object_name
self.conn = conn
self.chunk_size = reader_chunk_size
self.num_retries = num_retries
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
@gcs_logger
def read(self):
req = self.conn.objects().get_media(
bucket=self.bucket,
object=self.object_name)
fh = io.BytesIO()
downloader = GoogleMediaIoBaseDownload(
fh, req, chunksize=self.chunk_size)
done = False
while not done:
status, done = downloader.next_chunk(num_retries=self.num_retries)
LOG.debug('GCS Object download Complete.')
return fh.getvalue()
class GoogleMediaIoBaseDownload(http.MediaIoBaseDownload):
@http.util.positional(1)
def next_chunk(self, num_retries=None):
error_codes = CONF.backup_gcs_retry_error_codes
headers = {'range': 'bytes=%d-%d' %
(self._progress, self._progress + self._chunksize)}
gcs_http = self._request.http
for retry_num in range(num_retries + 1):
if retry_num > 0:
self._sleep(self._rand() * 2 ** retry_num)
resp, content = gcs_http.request(self._uri, headers=headers)
if resp.status < 500 and (str(resp.status)
not in error_codes):
break
if resp.status in [200, 206]:
if 'content-location' in resp and (
resp['content-location'] != self._uri):
self._uri = resp['content-location']
self._progress += len(content)
self._fd.write(content)
if 'content-range' in resp:
content_range = resp['content-range']
length = content_range.rsplit('/', 1)[1]
self._total_size = int(length)
elif 'content-length' in resp:
self._total_size = int(resp['content-length'])
if self._progress == self._total_size:
self._done = True
return (http.MediaDownloadProgress(self._progress,
self._total_size), self._done)
else:
raise http.HttpError(resp, content, uri=self._uri)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/backup/drivers/glusterfs.py 0000664 0000000 0000000 00000007247 15131732575 0025161 0 ustar 00root root 0000000 0000000 # Copyright (c) 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a backup service that uses GlusterFS as the backend."""
import os
import stat
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_concurrency import processutils as putils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_log import versionutils
from cinder.backup.drivers import posix
from cinder import exception
from cinder import interface
from cinder import utils
glusterfsbackup_service_opts = [
cfg.StrOpt('glusterfs_backup_mount_point',
default='$state_path/backup_mount',
help='Base dir containing mount point for gluster share.'),
cfg.StrOpt('glusterfs_backup_share',
help='GlusterFS share in '
': format. '
'Eg: 1.2.3.4:backup_vol'),
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(glusterfsbackup_service_opts)
@interface.backupdriver
class GlusterfsBackupDriver(posix.PosixBackupDriver):
"""Provides backup, restore and delete using GlusterFS repository."""
def __init__(self, context):
self.backup_mount_point_base = CONF.glusterfs_backup_mount_point
self.backup_share = CONF.glusterfs_backup_share
self._execute = putils.execute
self._root_helper = utils.get_root_helper()
backup_path = self._init_backup_repo_path()
super().__init__(context, backup_path=backup_path)
@staticmethod
def get_driver_options():
return glusterfsbackup_service_opts
def check_for_setup_error(self):
"""Raises error if any required configuration flag is missing."""
versionutils.report_deprecated_feature(
LOG,
"The Cinder GlusterFS Backup Driver is deprecated and will be "
"removed in the 2025.1 release.")
required_flags = ['glusterfs_backup_share']
for flag in required_flags:
val = getattr(CONF, flag, None)
if not val:
raise exception.InvalidConfigurationValue(option=flag,
value=val)
def _init_backup_repo_path(self):
remotefsclient = remotefs_brick.RemoteFsClient(
'glusterfs',
self._root_helper,
glusterfs_mount_point_base=self.backup_mount_point_base)
remotefsclient.mount(self.backup_share)
# Ensure we can write to this share
mount_path = remotefsclient.get_mount_point(self.backup_share)
group_id = os.getegid()
current_group_id = utils.get_file_gid(mount_path)
current_mode = utils.get_file_mode(mount_path)
if group_id != current_group_id:
cmd = ['chgrp', group_id, mount_path]
self._execute(*cmd, root_helper=self._root_helper,
run_as_root=True)
if not (current_mode & stat.S_IWGRP):
cmd = ['chmod', 'g+w', mount_path]
self._execute(*cmd, root_helper=self._root_helper,
run_as_root=True)
return mount_path
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/backup/drivers/nfs.py 0000664 0000000 0000000 00000010356 15131732575 0023724 0 ustar 00root root 0000000 0000000 # Copyright (C) 2015 Tom Barron
# Copyright (C) 2015 Kevin Fox
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a backup service that uses NFS storage as the backend."""
import os
import stat
from os_brick import exception as brick_exception
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_concurrency import processutils as putils
from oslo_config import cfg
from oslo_log import log as logging
from cinder.backup.drivers import posix
from cinder import exception
from cinder import interface
from cinder import utils
LOG = logging.getLogger(__name__)
nfsbackup_service_opts = [
cfg.StrOpt('backup_mount_point_base',
default='$state_path/backup_mount',
help='Base dir containing mount point for NFS share.'),
cfg.StrOpt('backup_share',
help='NFS share in hostname:path, ipv4addr:path, '
'or "[ipv6addr]:path" format.'),
cfg.StrOpt('backup_mount_options',
help=('Mount options passed to the NFS client. See NFS '
'man page for details.')),
cfg.IntOpt('backup_mount_attempts',
min=1,
default=3,
help='The number of attempts to mount NFS shares before '
'raising an error.'),
]
CONF = cfg.CONF
CONF.register_opts(nfsbackup_service_opts)
@interface.backupdriver
class NFSBackupDriver(posix.PosixBackupDriver):
"""Provides backup, restore and delete using NFS supplied repository."""
def __init__(self, context):
self.backup_mount_point_base = CONF.backup_mount_point_base
self.backup_share = CONF.backup_share
self.mount_options = CONF.backup_mount_options
self._execute = putils.execute
self._root_helper = utils.get_root_helper()
backup_path = self._init_backup_repo_path()
LOG.debug("Using NFS backup repository: %s", backup_path)
super().__init__(context, backup_path=backup_path)
def check_for_setup_error(self):
"""Raises error if any required configuration flag is missing."""
required_flags = ['backup_share']
for flag in required_flags:
val = getattr(CONF, flag, None)
if not val:
raise exception.InvalidConfigurationValue(option=flag,
value=val)
def _init_backup_repo_path(self):
if self.backup_share is None:
LOG.info("_init_backup_repo_path: "
"backup_share is not set in configuration")
return
remotefsclient = remotefs_brick.RemoteFsClient(
'nfs',
self._root_helper,
nfs_mount_point_base=self.backup_mount_point_base,
nfs_mount_options=self.mount_options)
@utils.retry(
(brick_exception.BrickException, putils.ProcessExecutionError),
retries=CONF.backup_mount_attempts)
def mount():
remotefsclient.mount(self.backup_share)
mount()
# Ensure we can write to this share
mount_path = remotefsclient.get_mount_point(self.backup_share)
group_id = os.getegid()
current_group_id = utils.get_file_gid(mount_path)
current_mode = utils.get_file_mode(mount_path)
if group_id != current_group_id:
cmd = ['chgrp', '-R', group_id, mount_path]
self._execute(*cmd, root_helper=self._root_helper,
run_as_root=True)
if not (current_mode & stat.S_IWGRP):
cmd = ['chmod', '-R', 'g+w', mount_path]
self._execute(*cmd, root_helper=self._root_helper,
run_as_root=True)
return mount_path
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/backup/drivers/posix.py 0000664 0000000 0000000 00000013466 15131732575 0024305 0 ustar 00root root 0000000 0000000 # Copyright (C) 2015 Tom Barron
# Copyright (C) 2015 Kevin Fox
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a backup service that uses a posix filesystem as the
backend."""
import errno
import os
import os.path
import stat
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from cinder.backup import chunkeddriver
from cinder import exception
from cinder import interface
LOG = logging.getLogger(__name__)
SHA_SIZE = 32768
# Multiple of SHA_SIZE, close to a characteristic OS max file system size.
BACKUP_FILE_SIZE = 61035 * 32768
posixbackup_service_opts = [
cfg.IntOpt('backup_file_size',
default=BACKUP_FILE_SIZE,
help='The maximum size in bytes of the files used to hold '
'backups. If the volume being backed up exceeds this '
'size, then it will be backed up into multiple files. '
'backup_file_size also determines the buffer size '
'used to build backup files, so should be scaled '
'according to available RAM and number of workers. '
'backup_file_size must be a multiple of '
'backup_sha_block_size_bytes.'),
cfg.IntOpt('backup_sha_block_size_bytes',
default=SHA_SIZE,
help='The size in bytes that changes are tracked '
'for incremental backups. backup_file_size has '
'to be multiple of backup_sha_block_size_bytes.'),
cfg.BoolOpt('backup_enable_progress_timer',
default=True,
help='Enable or Disable the timer to send the periodic '
'progress notifications to Ceilometer when backing '
'up the volume to the backend storage. The '
'default value is True to enable the timer.'),
cfg.StrOpt('backup_posix_path',
default='$state_path/backup',
help='Path specifying where to store backups.'),
cfg.StrOpt('backup_container',
help='Custom directory to use for backups.'),
]
CONF = cfg.CONF
CONF.register_opts(posixbackup_service_opts)
@interface.backupdriver
class PosixBackupDriver(chunkeddriver.ChunkedBackupDriver):
"""Provides backup, restore and delete using a Posix file system."""
def __init__(self, context, backup_path=None):
chunk_size_bytes = CONF.backup_file_size
sha_block_size_bytes = CONF.backup_sha_block_size_bytes
backup_default_container = CONF.backup_container
enable_progress_timer = CONF.backup_enable_progress_timer
super().__init__(
context,
chunk_size_bytes,
sha_block_size_bytes,
backup_default_container,
enable_progress_timer,
)
self.backup_path = backup_path
if not backup_path:
self.backup_path = CONF.backup_posix_path
if not self.backup_path:
raise exception.ConfigNotFound(path='backup_path')
LOG.debug("Using backup repository: %s", self.backup_path)
@staticmethod
def get_driver_options():
return posixbackup_service_opts
def update_container_name(self, backup, container):
if container is not None:
return container
id = backup['id']
return os.path.join(id[0:2], id[2:4], id)
def put_container(self, container):
path = os.path.join(self.backup_path, container)
if not os.path.exists(path):
os.makedirs(path)
permissions = (
stat.S_IRUSR |
stat.S_IWUSR |
stat.S_IXUSR |
stat.S_IRGRP |
stat.S_IWGRP |
stat.S_IXGRP)
os.chmod(path, permissions)
def get_container_entries(self, container, prefix):
path = os.path.join(self.backup_path, container)
return [i for i in os.listdir(path) if i.startswith(prefix)]
def get_object_writer(self, container, object_name, extra_metadata=None):
path = os.path.join(self.backup_path, container, object_name)
f = open(path, 'wb')
permissions = (
stat.S_IRUSR |
stat.S_IWUSR |
stat.S_IRGRP |
stat.S_IWGRP)
os.chmod(path, permissions)
return f
def get_object_reader(self, container, object_name, extra_metadata=None):
path = os.path.join(self.backup_path, container, object_name)
return open(path, 'rb')
def delete_object(self, container, object_name):
# TODO(tbarron): clean up the container path if it is empty
path = os.path.join(self.backup_path, container, object_name)
try:
os.remove(path)
except OSError as e:
# Ignore exception if path does not exist.
if e.errno != errno.ENOENT:
raise
def _generate_object_name_prefix(self, backup):
timestamp = timeutils.utcnow().strftime("%Y%m%d%H%M%S")
prefix = 'volume_%s_%s_backup_%s' % (backup.volume_id, timestamp,
backup.id)
LOG.debug('_generate_object_name_prefix: %s', prefix)
return prefix
def get_extra_metadata(self, backup, volume):
return None
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/backup/drivers/s3.py 0000664 0000000 0000000 00000037436 15131732575 0023473 0 ustar 00root root 0000000 0000000 # Copyright (C) 2020 leafcloud b.v.
# Copyright (C) 2020 FUJITSU LIMITED
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a backup service that uses S3 as the backend
**Related Flags**
:backup_s3_endpoint_url: The url where the S3 server is listening.
(default: None)
:backup_s3_store_bucket: The S3 bucket to be used to store
the Cinder backup data. (default: volumebackups)
:backup_s3_store_access_key: The S3 query token access key. (default: None)
:backup_s3_store_secret_key: The S3 query token secret key. (default: None)
:backup_s3_sse_customer_key: The SSECustomerKey.
backup_s3_sse_customer_algorithm must be set at
the same time to enable SSE. (default: None)
:backup_s3_sse_customer_algorithm: The SSECustomerAlgorithm.
backup_s3_sse_customer_key must be set at
the same time to enable SSE. (default: None)
:backup_s3_object_size: The size in bytes of S3 backup objects.
(default: 52428800)
:backup_s3_block_size: The size in bytes that changes are tracked
for incremental backups. backup_s3_object_size
has to be multiple of backup_s3_block_size.
(default: 32768).
:backup_s3_md5_validation: Enable or Disable md5 validation in the s3 backend.
(default: True)
:backup_s3_http_proxy: Address or host for the http proxy server.
(default: '')
:backup_s3_https_proxy: Address or host for the https proxy server.
(default: '')
:backup_s3_timeout: The time in seconds till a timeout exception is thrown.
(default: 60)
:backup_s3_max_pool_connections: The maximum number of connections
to keep in a connection pool. (default: 10)
:backup_s3_retry_max_attempts: An integer representing the maximum number of
retry attempts that will be made on
a single request. (default: 4)
:backup_s3_retry_mode: A string representing the type of retry mode.
e.g: legacy, standard, adaptive. (default: legacy)
:backup_s3_verify_ssl: Enable or Disable ssl verify.
(default: True)
:backup_s3_ca_cert_file: A filename of the CA cert bundle to use.
(default: None)
:backup_s3_enable_progress_timer: Enable or Disable the timer to send the
periodic progress notifications to
Ceilometer when backing up the volume to the
S3 backend storage. (default: True)
:backup_compression_algorithm: Compression algorithm to use for volume
backups.
"""
import base64
import functools
import hashlib
import io
import itertools as it
import socket
import boto3
from botocore.config import Config
from botocore import exceptions as boto_exc
from botocore.vendored.requests.packages.urllib3 import exceptions as \
urrlib_exc
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from cinder.backup import chunkeddriver
from cinder import exception
from cinder.i18n import _
from cinder import interface
LOG = logging.getLogger(__name__)
s3backup_service_opts = [
cfg.StrOpt('backup_s3_endpoint_url',
help=_('The url where the S3 server is listening.')),
cfg.StrOpt('backup_s3_store_access_key', secret=True,
help=_('The S3 query token access key.')),
cfg.StrOpt('backup_s3_store_secret_key', secret=True,
help=_('The S3 query token secret key.')),
cfg.StrOpt('backup_s3_store_bucket', default='volumebackups',
help=_('The S3 bucket to be used '
'to store the Cinder backup data.')),
cfg.IntOpt('backup_s3_object_size', default=52428800,
help='The size in bytes of S3 backup objects'),
cfg.IntOpt('backup_s3_block_size', default=32768,
help='The size in bytes that changes are tracked '
'for incremental backups. backup_s3_object_size '
'has to be multiple of backup_s3_block_size.'),
cfg.BoolOpt('backup_s3_enable_progress_timer', default=True,
help='Enable or Disable the timer to send the periodic '
'progress notifications to Ceilometer when backing '
'up the volume to the S3 backend storage. The '
'default value is True to enable the timer.'),
cfg.StrOpt('backup_s3_http_proxy', default='',
help='Address or host for the http proxy server.'),
cfg.StrOpt('backup_s3_https_proxy', default='',
help='Address or host for the https proxy server.'),
cfg.FloatOpt('backup_s3_timeout', default=60,
help='The time in seconds till '
'a timeout exception is thrown.'),
cfg.IntOpt('backup_s3_max_pool_connections', default=10,
help='The maximum number of connections '
'to keep in a connection pool.'),
cfg.IntOpt('backup_s3_retry_max_attempts', default=4,
help='An integer representing the maximum number of '
'retry attempts that will be made on a single request.'),
cfg.StrOpt('backup_s3_retry_mode', default='legacy',
help='A string representing the type of retry mode. '
'e.g: legacy, standard, adaptive'),
cfg.BoolOpt('backup_s3_verify_ssl', default=True,
help='Enable or Disable ssl verify.'),
cfg.StrOpt('backup_s3_ca_cert_file', default=None,
help='path/to/cert/bundle.pem '
'- A filename of the CA cert bundle to use.'),
cfg.BoolOpt('backup_s3_md5_validation', default=True,
help='Enable or Disable md5 validation in the s3 backend.'),
cfg.StrOpt('backup_s3_sse_customer_key', default=None, secret=True,
help='The SSECustomerKey. backup_s3_sse_customer_algorithm '
'must be set at the same time to enable SSE.'),
cfg.StrOpt('backup_s3_sse_customer_algorithm', default=None,
help='The SSECustomerAlgorithm. backup_s3_sse_customer_key '
'must be set at the same time to enable SSE.')
]
CONF = cfg.CONF
CONF.register_opts(s3backup_service_opts)
CONF.import_opt('backup_compression_algorithm', 'cinder.backup.chunkeddriver')
class S3ConnectionFailure(exception.BackupDriverException):
message = _("S3 connection failure: %(reason)s")
class S3ClientError(exception.BackupDriverException):
message = _("S3 client error: %(reason)s")
def _wrap_exception(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except boto_exc.ClientError as err:
raise S3ClientError(reason=err)
except Exception as err:
raise S3ConnectionFailure(reason=err)
return func_wrapper
@interface.backupdriver
class S3BackupDriver(chunkeddriver.ChunkedBackupDriver):
"""Provides backup, restore and delete of backup objects within S3."""
def __init__(self, context):
chunk_size_bytes = CONF.backup_s3_object_size
sha_block_size_bytes = CONF.backup_s3_block_size
backup_bucket = CONF.backup_s3_store_bucket
enable_progress_timer = CONF.backup_s3_enable_progress_timer
super().__init__(
context,
chunk_size_bytes,
sha_block_size_bytes,
backup_bucket,
enable_progress_timer,
)
config_args = dict(
connect_timeout=CONF.backup_s3_timeout,
read_timeout=CONF.backup_s3_timeout,
max_pool_connections=CONF.backup_s3_max_pool_connections,
retries={
'max_attempts': CONF.backup_s3_retry_max_attempts,
'mode': CONF.backup_s3_retry_mode})
if CONF.backup_s3_http_proxy:
config_args['proxies'] = {'http': CONF.backup_s3_http_proxy}
if CONF.backup_s3_https_proxy:
config_args.setdefault('proxies', {}).update(
{'https': CONF.backup_s3_https_proxy})
conn_args = {
'aws_access_key_id': CONF.backup_s3_store_access_key,
'aws_secret_access_key': CONF.backup_s3_store_secret_key,
'endpoint_url': CONF.backup_s3_endpoint_url,
'config': Config(**config_args)}
if CONF.backup_s3_verify_ssl:
conn_args['verify'] = CONF.backup_s3_ca_cert_file
if CONF.backup_s3_ca_cert_file is None:
LOG.warning('backup_s3_verify_ssl is True but no cert file '
'was provided')
else:
conn_args['verify'] = False
self.conn = boto3.client('s3', **conn_args)
@staticmethod
def get_driver_options():
backup_opts = [CONF._opts['backup_compression_algorithm']['opt']]
return s3backup_service_opts + backup_opts
@_wrap_exception
def put_container(self, bucket):
"""Create the bucket if not exists."""
try:
self.conn.head_bucket(Bucket=bucket)
except boto_exc.ClientError as e:
# NOTE: If it was a 404 error, then the bucket does not exist.
error_code = e.response['Error']['Code']
if error_code != '404':
raise
self.conn.create_bucket(Bucket=bucket)
@_wrap_exception
def get_container_entries(self, bucket, prefix):
"""Get bucket entry names."""
paginator = self.conn.get_paginator('list_objects_v2')
page_iterator = paginator.paginate(Bucket=bucket,
Prefix=prefix)
result = [obj_dict.get('Key') for obj_dict in it.chain.from_iterable(
page.get('Contents') for page in page_iterator)]
return result
def get_object_writer(self, bucket, object_name, extra_metadata=None):
"""Return a writer object.
Returns a writer object that stores a chunk of volume data in a
S3 object store.
"""
return S3ObjectWriter(bucket, object_name, self.conn)
def get_object_reader(self, bucket, object_name, extra_metadata=None):
"""Return reader object.
Returns a reader object that retrieves a chunk of backed-up volume data
from a S3 object store.
"""
return S3ObjectReader(bucket, object_name, self.conn)
@_wrap_exception
def delete_object(self, bucket, object_name):
"""Deletes a backup object from a S3 object store."""
self.conn.delete_object(
Bucket=bucket,
Key=object_name)
def _generate_object_name_prefix(self, backup):
"""Generates a S3 backup object name prefix.
prefix = volume_volid/timestamp/az_saz_backup_bakid
volid is volume id.
timestamp is time in UTC with format of YearMonthDateHourMinuteSecond.
saz is storage_availability_zone.
bakid is backup id for volid.
"""
az = 'az_%s' % self.az
backup_name = '%s_backup_%s' % (az, backup.id)
volume = 'volume_%s' % (backup.volume_id)
timestamp = timeutils.utcnow().strftime("%Y%m%d%H%M%S")
prefix = volume + '/' + timestamp + '/' + backup_name
LOG.debug('generate_object_name_prefix: %s', prefix)
return prefix
def update_container_name(self, backup, container):
"""Use the bucket name as provided - don't update."""
return
def get_extra_metadata(self, backup, volume):
"""S3 driver does not use any extra metadata."""
return
def check_for_setup_error(self):
required_options = ('backup_s3_endpoint_url',
'backup_s3_store_access_key',
'backup_s3_store_secret_key')
for opt in required_options:
val = getattr(CONF, opt, None)
if not val:
raise exception.InvalidConfigurationValue(option=opt,
value=val)
if ((not CONF.backup_s3_sse_customer_algorithm)
!= (not CONF.backup_s3_sse_customer_key)):
LOG.warning("Both the backup_s3_sse_customer_algorithm and "
"backup_s3_sse_customer_key options must be set "
"to enable SSE. SSE is disabled.")
try:
self.conn.list_buckets()
except Exception:
LOG.exception("Cannot list s3 buckets during backup "
"driver initialization.")
raise
class S3ObjectWriter(object):
def __init__(self, bucket, object_name, conn):
self.bucket = bucket
self.object_name = object_name
self.conn = conn
self.data = bytearray()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def write(self, data):
self.data += data
@_wrap_exception
def close(self):
reader = io.BytesIO(self.data)
contentmd5 = base64.b64encode(
hashlib.md5(self.data,
usedforsecurity=False).digest()).decode('utf-8')
put_args = {'Bucket': self.bucket,
'Body': reader,
'Key': self.object_name,
'ContentLength': len(self.data)}
if CONF.backup_s3_md5_validation:
put_args['ContentMD5'] = contentmd5
if (CONF.backup_s3_sse_customer_algorithm
and CONF.backup_s3_sse_customer_key):
put_args.update(
SSECustomerAlgorithm=CONF.backup_s3_sse_customer_algorithm,
SSECustomerKey=CONF.backup_s3_sse_customer_key)
self.conn.put_object(**put_args)
return contentmd5
class S3ObjectReader(object):
def __init__(self, bucket, object_name, conn):
self.bucket = bucket
self.object_name = object_name
self.conn = conn
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
@_wrap_exception
def read(self):
get_args = {'Bucket': self.bucket,
'Key': self.object_name}
if (CONF.backup_s3_sse_customer_algorithm
and CONF.backup_s3_sse_customer_key):
get_args.update(
SSECustomerAlgorithm=CONF.backup_s3_sse_customer_algorithm,
SSECustomerKey=CONF.backup_s3_sse_customer_key)
# NOTE: these retries account for errors that occur when streaming
# down the data from s3 (i.e. socket errors and read timeouts that
# occur after recieving an OK response from s3). Other retryable
# exceptions such as throttling errors and 5xx errors are already
# retried by botocore.
last_exception = None
for i in range(CONF.backup_s3_retry_max_attempts):
try:
resp = self.conn.get_object(**get_args)
return resp.get('Body').read()
except (socket.timeout, socket.error,
urrlib_exc.ReadTimeoutError,
boto_exc.IncompleteReadError) as e:
last_exception = e
continue
raise S3ClientError(reason=last_exception)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/backup/drivers/swift.py 0000664 0000000 0000000 00000051673 15131732575 0024301 0 ustar 00root root 0000000 0000000 # Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2014 TrilioData, Inc
# Copyright (c) 2015 EMC Corporation
# Copyright (C) 2015 Kevin Fox
# Copyright (C) 2015 Tom Barron
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a backup service that uses Swift as the backend
**Related Flags**
:backup_swift_url: The URL of the Swift endpoint (default: None, use catalog).
:backup_swift_auth_url: The URL of the Keystone endpoint for authentication
(default: None, use catalog).
:swift_catalog_info: Info to match when looking for swift in the service '
catalog.
:keystone_catalog_info: Info to match when looking for keystone in the service
catalog.
:backup_swift_object_size: The size in bytes of the Swift objects used
for volume backups (default: 52428800).
:backup_swift_retry_attempts: The number of retries to make for Swift
operations (default: 10).
:backup_swift_retry_backoff: The backoff time in seconds between retrying
failed Swift operations (default: 10).
:backup_compression_algorithm: Compression algorithm to use for volume
backups. Supported options are:
None (to disable), zlib and bz2 (default: zlib)
:backup_swift_ca_cert_file: The location of the CA certificate file to use
for swift client requests (default: None)
:backup_swift_auth_insecure: If true, bypass verification of server's
certificate for SSL connections (default: False)
"""
import hashlib
import io
import socket
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from swiftclient import client as swift
from swiftclient import exceptions as swift_exc
from cinder.backup import chunkeddriver
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder import service_auth
from cinder.utils import retry
LOG = logging.getLogger(__name__)
swiftbackup_service_opts = [
cfg.URIOpt('backup_swift_url',
help='The URL of the Swift endpoint'),
cfg.URIOpt('backup_swift_auth_url',
help='The URL of the Keystone endpoint'),
cfg.StrOpt('swift_catalog_info',
default='object-store:swift:publicURL',
help='Info to match when looking for swift in the service '
'catalog. Format is: separated values of the form: '
':: - '
'Only used if backup_swift_url is unset'),
cfg.StrOpt('keystone_catalog_info',
default='identity:Identity Service:publicURL',
help='Info to match when looking for keystone in the service '
'catalog. Format is: separated values of the form: '
':: - '
'Only used if backup_swift_auth_url is unset'),
cfg.StrOpt('backup_swift_auth',
default='per_user',
choices=['per_user', 'single_user'],
help='Swift authentication mechanism (per_user or '
'single_user).'),
cfg.StrOpt('backup_swift_auth_version',
default='1',
help='Swift authentication version. Specify "1" for auth 1.0'
', or "2" for auth 2.0 or "3" for auth 3.0'),
cfg.StrOpt('backup_swift_tenant',
help='Swift tenant/account name. Required when connecting'
' to an auth 2.0 system'),
cfg.StrOpt('backup_swift_user_domain',
default=None,
help='Swift user domain name. Required when connecting'
' to an auth 3.0 system'),
cfg.StrOpt('backup_swift_project_domain',
default=None,
help='Swift project domain name. Required when connecting'
' to an auth 3.0 system'),
cfg.StrOpt('backup_swift_project',
default=None,
help='Swift project/account name. Required when connecting'
' to an auth 3.0 system'),
cfg.StrOpt('backup_swift_user',
help='Swift user name'),
cfg.StrOpt('backup_swift_key',
secret=True,
help='Swift key for authentication'),
cfg.StrOpt('backup_swift_container',
default='volumebackups',
help='The default Swift container to use'),
cfg.StrOpt('backup_swift_create_storage_policy',
default=None,
help='The storage policy to use when creating the Swift '
'container. If the container already exists the '
'storage policy cannot be enforced'),
cfg.IntOpt('backup_swift_object_size',
default=52428800,
help='The size in bytes of Swift backup objects'),
cfg.IntOpt('backup_swift_block_size',
default=32768,
help='The size in bytes that changes are tracked '
'for incremental backups. backup_swift_object_size '
'has to be multiple of backup_swift_block_size.'),
cfg.IntOpt('backup_swift_retry_attempts',
default=3,
help='The number of retries to make for Swift operations'),
cfg.IntOpt('backup_swift_retry_backoff',
default=2,
help='The backoff time in seconds between Swift retries'),
cfg.BoolOpt('backup_swift_enable_progress_timer',
default=True,
help='Enable or Disable the timer to send the periodic '
'progress notifications to Ceilometer when backing '
'up the volume to the Swift backend storage. The '
'default value is True to enable the timer.'),
cfg.StrOpt('backup_swift_ca_cert_file',
help='Location of the CA certificate file to use for swift '
'client requests.'),
cfg.BoolOpt('backup_swift_auth_insecure',
default=False,
help='Bypass verification of server certificate when '
'making SSL connection to Swift.'),
cfg.BoolOpt('backup_swift_service_auth',
default=False,
help='Send a X-Service-Token header with service auth '
'credentials. If enabled you also must set the '
'service_user group and enable send_service_user_token.'),
]
CONF = cfg.CONF
CONF.register_opts(swiftbackup_service_opts)
@interface.backupdriver
class SwiftBackupDriver(chunkeddriver.ChunkedBackupDriver):
"""Provides backup, restore and delete of backup objects within Swift."""
def __init__(self, context):
chunk_size_bytes = CONF.backup_swift_object_size
sha_block_size_bytes = CONF.backup_swift_block_size
backup_default_container = CONF.backup_swift_container
enable_progress_timer = CONF.backup_swift_enable_progress_timer
super().__init__(
context,
chunk_size_bytes,
sha_block_size_bytes,
backup_default_container,
enable_progress_timer,
)
# Do not intialize the instance created when the backup service
# starts up. The context will be missing information to do things
# like fetching endpoints from the service catalog.
if context and context.user_id:
self.initialize()
@staticmethod
def get_driver_options():
return swiftbackup_service_opts
@retry(Exception, retries=CONF.backup_swift_retry_attempts,
backoff_rate=CONF.backup_swift_retry_backoff)
def _headers(self, headers=None):
"""Add service token to headers if its enabled"""
if not CONF.backup_swift_service_auth:
return headers
result = headers or {}
sa_plugin = service_auth.get_service_auth_plugin()
if sa_plugin is not None:
sa_session = service_auth.get_service_session()
result['X-Service-Token'] = sa_plugin.get_token(session=sa_session)
return result
def initialize(self):
self.swift_attempts = CONF.backup_swift_retry_attempts
self.swift_backoff = CONF.backup_swift_retry_backoff
self.backup_swift_auth_insecure = CONF.backup_swift_auth_insecure
if CONF.backup_swift_auth == 'single_user':
if CONF.backup_swift_user is None:
LOG.error("single_user auth mode enabled, "
"but %(param)s not set",
{'param': 'backup_swift_user'})
raise exception.ParameterNotFound(param='backup_swift_user')
if CONF.backup_swift_auth_url is None:
self.auth_url = None
info = CONF.keystone_catalog_info
try:
service_type, service_name, endpoint_type = info.split(':')
except ValueError:
raise exception.BackupDriverException(_(
"Failed to parse the configuration option "
"'keystone_catalog_info', must be in the form "
"::"))
for entry in self.context.service_catalog:
if entry.get('type') == service_type:
# It is assumed that service_types are unique within
# the service catalog, so once the correct one is found
# it is safe to break out of the loop
self.auth_url = entry.get(
'endpoints')[0].get(endpoint_type)
break
else:
self.auth_url = CONF.backup_swift_auth_url
if self.auth_url is None:
raise exception.BackupDriverException(_(
"Could not determine which Keystone endpoint to use. This "
"can either be set in the service catalog or with the "
"cinder.conf config option 'backup_swift_auth_url'."))
LOG.debug("Using auth URL %s", self.auth_url)
LOG.debug('Connect to %s in "%s" mode', CONF.backup_swift_auth_url,
CONF.backup_swift_auth)
os_options = {}
if CONF.backup_swift_user_domain is not None:
os_options['user_domain_name'] = CONF.backup_swift_user_domain
if CONF.backup_swift_project_domain is not None:
os_options['project_domain_name'] = (
CONF.backup_swift_project_domain
)
if CONF.backup_swift_project is not None:
os_options['project_name'] = CONF.backup_swift_project
self.conn = swift.Connection(
authurl=self.auth_url,
auth_version=CONF.backup_swift_auth_version,
tenant_name=CONF.backup_swift_tenant,
user=CONF.backup_swift_user,
key=CONF.backup_swift_key,
os_options=os_options,
retries=self.swift_attempts,
starting_backoff=self.swift_backoff,
insecure=self.backup_swift_auth_insecure,
cacert=CONF.backup_swift_ca_cert_file)
else:
if CONF.backup_swift_url is None:
self.swift_url = None
info = CONF.swift_catalog_info
try:
service_type, service_name, endpoint_type = info.split(':')
except ValueError:
raise exception.BackupDriverException(_(
"Failed to parse the configuration option "
"'swift_catalog_info', must be in the form "
"::"))
for entry in self.context.service_catalog:
if entry.get('type') == service_type:
# It is assumed that service_types are unique within
# the service catalog, so once the correct one is found
# it is safe to break out of the loop
self.swift_url = entry.get(
'endpoints')[0].get(endpoint_type)
break
else:
self.swift_url = '%s%s' % (CONF.backup_swift_url,
self.context.project_id)
if self.swift_url is None:
raise exception.BackupDriverException(_(
"Could not determine which Swift endpoint to use. This "
"can either be set in the service catalog or with the "
"cinder.conf config option 'backup_swift_url'."))
LOG.debug("Using swift URL %s", self.swift_url)
LOG.debug('Connect to %s in "%s" mode', CONF.backup_swift_url,
CONF.backup_swift_auth)
self.conn = swift.Connection(retries=self.swift_attempts,
preauthurl=self.swift_url,
preauthtoken=self.context.auth_token,
starting_backoff=self.swift_backoff,
insecure=(
self.backup_swift_auth_insecure),
cacert=CONF.backup_swift_ca_cert_file)
class SwiftObjectWriter(object):
def __init__(self, container, object_name, conn, headers_func=None):
self.container = container
self.object_name = object_name
self.conn = conn
self.data = bytearray()
self.headers_func = headers_func
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def write(self, data):
self.data += data
def close(self):
reader = io.BytesIO(self.data)
try:
headers = self.headers_func() if self.headers_func else None
etag = self.conn.put_object(self.container, self.object_name,
reader,
content_length=len(self.data),
headers=headers)
except socket.error as err:
raise exception.SwiftConnectionFailed(reason=err)
md5 = hashlib.md5(self.data, usedforsecurity=False).hexdigest()
if etag != md5:
err = _('error writing object to swift, MD5 of object in '
'swift %(etag)s is not the same as MD5 of object sent '
'to swift %(md5)s') % {'etag': etag, 'md5': md5}
raise exception.InvalidBackup(reason=err)
return md5
class SwiftObjectReader(object):
def __init__(self, container, object_name, conn, headers_func=None):
self.container = container
self.object_name = object_name
self.conn = conn
self.headers_func = headers_func
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def read(self):
try:
headers = self.headers_func() if self.headers_func else None
(_resp, body) = self.conn.get_object(self.container,
self.object_name,
headers=headers)
except socket.error as err:
raise exception.SwiftConnectionFailed(reason=err)
return body
def put_container(self, container):
"""Create the container if needed.
Check if the container exist by issuing a HEAD request, if
the container does not exist we create it.
We cannot enforce a new storage policy on an
existing container.
"""
try:
self.conn.head_container(container, headers=self._headers())
except swift_exc.ClientException as e:
if e.http_status == 404:
try:
storage_policy = CONF.backup_swift_create_storage_policy
headers = ({'X-Storage-Policy': storage_policy}
if storage_policy else None)
self.conn.put_container(container,
headers=self._headers(headers))
except socket.error as err:
raise exception.SwiftConnectionFailed(reason=err)
return
LOG.warning("Failed to HEAD container to determine if it "
"exists and should be created.")
raise exception.SwiftConnectionFailed(reason=e)
except socket.error as err:
raise exception.SwiftConnectionFailed(reason=err)
def get_container_entries(self, container, prefix):
"""Get container entry names"""
try:
headers = self._headers()
swift_objects = self.conn.get_container(container,
prefix=prefix,
full_listing=True,
headers=headers)[1]
except socket.error as err:
raise exception.SwiftConnectionFailed(reason=err)
swift_object_names = [swift_obj['name'] for swift_obj in swift_objects]
return swift_object_names
def get_object_writer(self, container, object_name, extra_metadata=None):
"""Return a writer object.
Returns a writer object that stores a chunk of volume data in a
Swift object store.
"""
return self.SwiftObjectWriter(container, object_name, self.conn,
self._headers)
def get_object_reader(self, container, object_name, extra_metadata=None):
"""Return reader object.
Returns a reader object that retrieves a chunk of backed-up volume data
from a Swift object store.
"""
return self.SwiftObjectReader(container, object_name, self.conn,
self._headers)
def delete_object(self, container, object_name):
"""Deletes a backup object from a Swift object store."""
try:
self.conn.delete_object(container, object_name,
headers=self._headers())
except socket.error as err:
raise exception.SwiftConnectionFailed(reason=err)
except swift_exc.ClientException as err:
if err.http_status != 404:
raise
def _generate_object_name_prefix(self, backup):
"""Generates a Swift backup object name prefix."""
az = 'az_%s' % self.az
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
timestamp = timeutils.utcnow().strftime("%Y%m%d%H%M%S")
prefix = volume + '/' + timestamp + '/' + backup_name
LOG.debug('generate_object_name_prefix: %s', prefix)
return prefix
def update_container_name(self, backup, container):
"""Use the container name as provided - don't update."""
return container
def get_extra_metadata(self, backup, volume):
"""Swift driver does not use any extra metadata."""
return None
def check_for_setup_error(self):
# Here we are trying to connect to swift backend service
# without any additional parameters.
# At the moment of execution we don't have any user data
# After just trying to do easiest operations, that will show
# that we've configured swift backup driver in right way
if not CONF.backup_swift_url:
LOG.warning("We will use endpoints from keystone. It is "
"possible we could have problems because of it.")
return
conn = swift.Connection(retries=CONF.backup_swift_retry_attempts,
preauthurl=CONF.backup_swift_url,
cacert=CONF.backup_swift_ca_cert_file)
try:
conn.get_capabilities()
# TODO(e0ne) catch less general exception
except Exception:
LOG.exception("Can not get Swift capabilities during backup "
"driver initialization.")
raise
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/backup/manager.py 0000664 0000000 0000000 00000161500 15131732575 0023070 0 ustar 00root root 0000000 0000000 # Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Backup manager manages volume backups.
Volume Backups are full copies of persistent volumes stored in a backup
store e.g. an object store or any other backup store if and when support is
added. They are usable without the original object being available. A
volume backup can be restored to the original volume it was created from or
any other available volume with a minimum size of the original volume.
Volume backups can be created, restored, deleted and listed.
**Related Flags**
:backup_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.backup.manager.Manager`).
"""
import contextlib
import os
import typing
from castellan import key_manager
from eventlet import tpool
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_service import loopingcall
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from cinder.backup import rpcapi as backup_rpcapi
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder.keymgr import migration as key_migration
from cinder import manager
from cinder.message import api as message_api
from cinder.message import message_field
from cinder import objects
from cinder.objects import fields
from cinder import quota
from cinder import utils
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
backup_manager_opts = [
cfg.StrOpt('backup_driver',
default='cinder.backup.drivers.swift.SwiftBackupDriver',
help='Driver to use for backups.',),
cfg.IntOpt('backup_driver_init_check_interval',
default=60,
min=5,
help='Time in seconds between checks to see if the backup '
'driver has been successfully initialized, any time '
'the driver is restarted.'),
cfg.IntOpt('backup_driver_stats_polling_interval',
default=60,
min=10,
deprecated_name='backup_driver_status_check_interval',
help='Time in seconds between checks of the backup driver '
'status. If does not report as working, it is '
'restarted.'),
cfg.BoolOpt('backup_service_inithost_offload',
default=True,
help='Offload pending backup delete during '
'backup service startup. If false, the backup service '
'will remain down until all pending backups are '
'deleted.',),
cfg.IntOpt('backup_native_threads_pool_size',
default=60,
min=20,
help='Size of the native threads pool for the backups. '
'Most backup drivers rely heavily on this, it can be '
'decreased for specific drivers that don\'t.'),
]
CONF = cfg.CONF
CONF.register_opts(backup_manager_opts)
CONF.import_opt('use_multipath_for_image_xfer', 'cinder.volume.driver')
CONF.import_opt('num_volume_device_scan_tries', 'cinder.volume.driver')
QUOTAS = quota.QUOTAS
MAPPING = {
# Module name "google" conflicts with google library namespace inside the
# driver when it imports google.auth
'cinder.backup.drivers.google.GoogleBackupDriver':
'cinder.backup.drivers.gcs.GoogleBackupDriver',
}
SERVICE_PGRP = '' if os.name == 'nt' else os.getpgrp()
# TODO(geguileo): Once Eventlet issue #432 gets fixed we can just tpool.execute
# the whole call to the driver's backup and restore methods instead of proxy
# wrapping the device_file and having the drivers also proxy wrap their
# writes/reads and the compression/decompression calls.
# (https://github.com/eventlet/eventlet/issues/432)
class BackupManager(manager.SchedulerDependentManager):
"""Manages backup of block storage devices."""
RPC_API_VERSION = backup_rpcapi.BackupAPI.RPC_API_VERSION
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, *args, **kwargs):
self.az = CONF.storage_availability_zone
self.backup_rpcapi = backup_rpcapi.BackupAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
super(BackupManager, self).__init__(*args, **kwargs)
self.is_initialized = False
self._set_tpool_size(CONF.backup_native_threads_pool_size)
self._process_number = kwargs.get('process_number', 1)
self._semaphore = kwargs.get('semaphore', contextlib.suppress())
self.driver_name = CONF.backup_driver
if self.driver_name in MAPPING:
new_name = MAPPING[self.driver_name]
LOG.warning('Backup driver path %s is deprecated, update your '
'configuration to the new path %s',
self.driver_name, new_name)
self.driver_name = new_name
self.service = importutils.import_class(self.driver_name)
self.message_api = message_api.API()
@typing.no_type_check
def init_host(self, **kwargs):
"""Run initialization needed for a standalone service."""
ctxt = context.get_admin_context()
self.setup_backup_backend(ctxt)
try:
self._cleanup_incomplete_backup_operations(ctxt)
except Exception:
# Don't block startup of the backup service.
LOG.exception("Problem cleaning incomplete backup operations.")
# Migrate any ConfKeyManager keys based on fixed_key to the currently
# configured key manager.
backups = objects.BackupList.get_all_by_host(ctxt, self.host)
self._add_to_threadpool(key_migration.migrate_fixed_key,
backups=backups)
self.publish_service_capabilities(ctxt)
def _setup_backup_driver(self, ctxt):
backup_service = self.service(context=ctxt)
backup_service.check_for_setup_error()
self.is_initialized = True
raise loopingcall.LoopingCallDone()
def setup_backup_backend(self, ctxt):
try:
init_loop = loopingcall.FixedIntervalLoopingCall(
self._setup_backup_driver, ctxt)
init_loop.start(interval=CONF.backup_driver_init_check_interval)
except loopingcall.LoopingCallDone:
LOG.info("Backup driver was successfully initialized.")
except Exception:
LOG.exception("Failed to initialize driver.",
resource={'type': 'driver',
'id': self.__class__.__name__})
def reset(self):
super(BackupManager, self).reset()
self.backup_rpcapi = backup_rpcapi.BackupAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
@utils.synchronized('cleanup_incomplete_backups_%s' % SERVICE_PGRP,
external=True, delay=0.1)
def _cleanup_incomplete_backup_operations(self, ctxt):
# Only the first launched process should do the cleanup, the others
# have waited on the lock for the first one to finish the cleanup and
# can now continue with the start process.
if self._process_number != 1:
LOG.debug("Process #%s %sskips cleanup.",
self._process_number,
'(pgid=%s) ' % SERVICE_PGRP if SERVICE_PGRP else '')
return
LOG.info("Cleaning up incomplete backup operations.")
# TODO(smulcahy) implement full resume of backup and restore
# operations on restart (rather than simply resetting).
# We only need to deal with the backups that aren't complete.
# N.B. NULL status is possible and we consider it incomplete.
incomplete_status = list(fields.BackupStatus.ALL)
incomplete_status.remove(fields.BackupStatus.AVAILABLE)
incomplete_status.append(None)
backups = objects.BackupList.get_all(
ctxt, filters={'host': self.host, 'status': incomplete_status})
for backup in backups:
try:
self._cleanup_one_backup(ctxt, backup)
except Exception:
LOG.exception("Problem cleaning up backup %(bkup)s.",
{'bkup': backup['id']})
try:
self._cleanup_temp_volumes_snapshots_for_one_backup(ctxt,
backup)
except Exception:
LOG.exception("Problem cleaning temp volumes and "
"snapshots for backup %(bkup)s.",
{'bkup': backup['id']})
def _cleanup_one_volume(self, ctxt, volume_id):
try:
volume = objects.Volume.get_by_id(ctxt, volume_id)
except exception.VolumeNotFound:
LOG.info('Volume %s does not exist anymore. Ignoring.', volume_id)
return
if volume['status'] == 'backing-up':
self._detach_all_attachments(ctxt, volume)
LOG.info('Resetting volume %(vol_id)s to previous '
'status %(status)s (was backing-up).',
{'vol_id': volume['id'],
'status': volume['previous_status']})
self.db.volume_update(ctxt, volume['id'],
{'status': volume['previous_status']})
elif volume['status'] == 'restoring-backup':
self._detach_all_attachments(ctxt, volume)
LOG.info('Setting volume %s to error_restoring '
'(was restoring-backup).', volume['id'])
self.db.volume_update(ctxt, volume['id'],
{'status': 'error_restoring'})
def _cleanup_one_snapshot(self, ctxt, snapshot_id):
try:
snapshot = objects.Snapshot.get_by_id(ctxt, snapshot_id)
except exception.SnapshotNotFound:
LOG.info('Snapshot %s does not exist anymore. Ignoring.',
snapshot_id)
return
if snapshot['status'] == 'backing-up':
LOG.info('Resetting snapshot %(snap_id)s to previous '
'status %(status)s (was backing-up).',
{'snap_id': snapshot['id'],
'status': fields.SnapshotStatus.AVAILABLE})
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.save()
def _cleanup_one_backup(self, ctxt, backup):
if backup['status'] == fields.BackupStatus.CREATING:
LOG.info('Resetting backup %s to error (was creating).',
backup['id'])
self._cleanup_one_volume(ctxt, backup.volume_id)
if backup.snapshot_id:
self._cleanup_one_snapshot(ctxt, backup.snapshot_id)
err = 'incomplete backup reset on manager restart'
volume_utils.update_backup_error(backup, err)
elif backup['status'] == fields.BackupStatus.RESTORING:
LOG.info('Resetting backup %s to '
'available (was restoring).',
backup['id'])
self._cleanup_one_volume(ctxt, backup.restore_volume_id)
if backup.snapshot_id:
self._cleanup_one_snapshot(ctxt, backup.snapshot_id)
backup.status = fields.BackupStatus.AVAILABLE
backup.save()
elif backup['status'] == fields.BackupStatus.DELETING:
# Don't resume deleting the backup of an encrypted volume. The
# admin context won't be sufficient to delete the backup's copy
# of the encryption key ID (a real user context is required).
if backup.encryption_key_id is None:
LOG.info('Resuming delete on backup: %s.', backup.id)
if CONF.backup_service_inithost_offload:
# Offload all the pending backup delete operations to the
# threadpool to prevent the main backup service thread
# from being blocked.
self._add_to_threadpool(self.delete_backup, ctxt, backup)
else:
# Delete backups sequentially
self.delete_backup(ctxt, backup)
else:
LOG.info('Unable to resume deleting backup of an encrypted '
'volume, resetting backup %s to error_deleting '
'(was deleting).',
backup.id)
backup.status = fields.BackupStatus.ERROR_DELETING
backup.save()
def _detach_all_attachments(self, ctxt, volume):
attachments = volume['volume_attachment'] or []
for attachment in attachments:
if (attachment['attached_host'] == self.host and
attachment['instance_uuid'] is None):
try:
rpcapi = self.volume_rpcapi
rpcapi.detach_volume(ctxt, volume, attachment['id'])
except Exception:
LOG.exception("Detach attachment %(attach_id)s failed.",
{'attach_id': attachment['id']},
resource=volume)
def _delete_temp_volume(self, ctxt, backup):
try:
temp_volume = objects.Volume.get_by_id(
ctxt, backup.temp_volume_id)
self.volume_rpcapi.delete_volume(ctxt, temp_volume)
except exception.VolumeNotFound:
LOG.debug("Could not find temp volume %(vol)s to clean up "
"for backup %(backup)s.",
{'vol': backup.temp_volume_id,
'backup': backup.id})
backup.temp_volume_id = None
backup.save()
def _delete_temp_snapshot(self, ctxt, backup):
try:
temp_snapshot = objects.Snapshot.get_by_id(
ctxt, backup.temp_snapshot_id)
# We may want to consider routing those calls through the
# cinder API.
temp_snapshot.status = fields.SnapshotStatus.DELETING
temp_snapshot.save()
self.volume_rpcapi.delete_snapshot(ctxt, temp_snapshot)
except exception.SnapshotNotFound:
LOG.debug("Could not find temp snapshot %(snap)s to clean "
"up for backup %(backup)s.",
{'snap': backup.temp_snapshot_id,
'backup': backup.id})
backup.temp_snapshot_id = None
backup.save()
def _cleanup_temp_volumes_snapshots_for_one_backup(self, ctxt, backup):
# NOTE(xyang): If the service crashes or gets restarted during the
# backup operation, there could be temporary volumes or snapshots
# that are not deleted. Make sure any temporary volumes or snapshots
# create by the backup job are deleted when service is started.
if (backup.temp_volume_id
and backup.status == fields.BackupStatus.ERROR):
self._delete_temp_volume(ctxt, backup)
if (backup.temp_snapshot_id
and backup.status == fields.BackupStatus.ERROR):
self._delete_temp_snapshot(ctxt, backup)
def _cleanup_temp_volumes_snapshots_when_backup_created(
self, ctxt, backup):
# Delete temp volumes or snapshots when backup creation is completed.
if backup.temp_volume_id:
self._delete_temp_volume(ctxt, backup)
if backup.temp_snapshot_id:
self._delete_temp_snapshot(ctxt, backup)
@utils.limit_operations
def create_backup(self, context, backup):
"""Create volume backups using configured backup service."""
volume_id = backup.volume_id
snapshot_id = backup.snapshot_id
volume = objects.Volume.get_by_id(context, volume_id)
snapshot = objects.Snapshot.get_by_id(
context, snapshot_id) if snapshot_id else None
previous_status = volume.get('previous_status', None)
context.message_resource_id = backup.id
context.message_resource_type = message_field.Resource.VOLUME_BACKUP
context.message_action = message_field.Action.BACKUP_CREATE
if snapshot_id:
log_message = ('Create backup started, backup: %(backup_id)s '
'volume: %(volume_id)s snapshot: %(snapshot_id)s.'
% {'backup_id': backup.id,
'volume_id': volume_id,
'snapshot_id': snapshot_id})
else:
log_message = ('Create backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.'
% {'backup_id': backup.id,
'volume_id': volume_id})
LOG.info(log_message)
self._notify_about_backup_usage(context, backup, "create.start")
expected_status = "backing-up"
if snapshot:
actual_status = snapshot['status']
if actual_status != expected_status:
err = _('Create backup aborted, expected snapshot status '
'%(expected_status)s but got %(actual_status)s.') % {
'expected_status': expected_status,
'actual_status': actual_status,
}
volume_utils.update_backup_error(backup, err)
raise exception.InvalidSnapshot(reason=err)
else:
actual_status = volume['status']
if actual_status != expected_status:
err = _('Create backup aborted, expected volume status '
'%(expected_status)s but got %(actual_status)s.') % {
'expected_status': expected_status,
'actual_status': actual_status,
}
volume_utils.update_backup_error(backup, err)
raise exception.InvalidVolume(reason=err)
expected_status = fields.BackupStatus.CREATING
actual_status = backup.status
if actual_status != expected_status:
err = _('Create backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') % {
'expected_status': expected_status,
'actual_status': actual_status,
}
volume_utils.update_backup_error(backup, err)
self.message_api.create_from_request_context(
context,
detail=message_field.Detail.BACKUP_INVALID_STATE)
raise exception.InvalidBackup(reason=err)
try:
if not self.is_working():
err = _('Create backup aborted due to backup service is down.')
volume_utils.update_backup_error(backup, err)
self.message_api.create_from_request_context(
context,
detail=message_field.Detail.BACKUP_SERVICE_DOWN)
raise exception.InvalidBackup(reason=err)
if not backup.availability_zone:
backup.availability_zone = self.az
backup.service = self.driver_name
backup.save()
# Start backup, then continue_backup, then finish_backup
self._start_backup(context, backup, volume)
except Exception as err:
with excutils.save_and_reraise_exception():
if snapshot_id:
assert snapshot is not None
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.save()
else:
try:
self.db.volume_update(
context, volume_id,
{'status': previous_status,
'previous_status': 'error_backing-up'})
except exception.VolumeNotFound:
# If the volume was deleted we cannot update its
# status but we still want to set the backup to error.
pass
volume_utils.update_backup_error(backup, str(err))
def _start_backup(self, context, backup, volume):
"""This starts the backup process.
First we have to get the backup device from the volume manager.
This can take a long time to complete. Once the volume manager
is done creating/getting the backup device, then we get a callback
to complete the process of backing up the volume.
"""
# Save a copy of the encryption key ID in case the volume is deleted.
if (volume.encryption_key_id is not None and
backup.encryption_key_id is None):
backup.encryption_key_id = volume_utils.clone_encryption_key(
context,
key_manager.API(CONF),
volume.encryption_key_id)
backup.save()
# This is an async call to the volume manager. We will get a
# callback from the volume manager to continue once it's done.
LOG.info("Call Volume Manager to get_backup_device for %s", backup)
self.volume_rpcapi.get_backup_device(context, backup, volume)
def continue_backup(self, context, backup, backup_device):
"""This is the callback from the volume manager to continue."""
message_created = False
volume_id = backup.volume_id
volume = objects.Volume.get_by_id(context, volume_id)
snapshot_id = backup.snapshot_id
snapshot = objects.Snapshot.get_by_id(
context, snapshot_id) if snapshot_id else None
previous_status = volume.get('previous_status', None)
backup_service = self.service(context)
properties = volume_utils.brick_get_connector_properties(
CONF.use_multipath_for_image_xfer, enforce_multipath=False)
updates = {}
try:
try:
attach_info = self._attach_device(context,
backup_device.device_obj,
properties,
backup_device.is_snapshot)
except Exception:
with excutils.save_and_reraise_exception():
if not message_created:
message_created = True
self.message_api.create_from_request_context(
context,
detail=message_field.Detail.ATTACH_ERROR)
try:
device_path = attach_info['device']['path']
if (isinstance(device_path, str) and
not os.path.isdir(device_path)):
if backup_device.secure_enabled:
with open(device_path, 'rb') as device_file:
updates = backup_service.backup(
backup, tpool.Proxy(device_file))
else:
with utils.temporary_chown(device_path):
with open(device_path, 'rb') as device_file:
updates = backup_service.backup(
backup, tpool.Proxy(device_file))
# device_path is already file-like so no need to open it
else:
updates = backup_service.backup(backup,
tpool.Proxy(device_path))
except Exception:
with excutils.save_and_reraise_exception():
if not message_created:
message_created = True
self.message_api.create_from_request_context(
context,
detail=
message_field.Detail.BACKUP_CREATE_DRIVER_ERROR)
finally:
try:
self._detach_device(context, attach_info,
backup_device.device_obj, properties,
backup_device.is_snapshot, force=True,
ignore_errors=True)
except Exception:
with excutils.save_and_reraise_exception():
if not message_created:
message_created = True
self.message_api.create_from_request_context(
context,
detail=
message_field.Detail.DETACH_ERROR)
except Exception as err:
with excutils.save_and_reraise_exception():
if snapshot:
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.save()
else:
self.db.volume_update(
context, volume_id,
{'status': previous_status,
'previous_status': 'error_backing-up'})
volume_utils.update_backup_error(backup, str(err))
finally:
with backup.as_read_deleted():
backup.refresh()
try:
self._cleanup_temp_volumes_snapshots_when_backup_created(
context, backup)
except Exception:
with excutils.save_and_reraise_exception():
if not message_created:
self.message_api.create_from_request_context(
context,
detail=
message_field.Detail.BACKUP_CREATE_CLEANUP_ERROR)
self._finish_backup(context, backup, volume, updates)
def _finish_backup(self, context, backup, volume, updates):
volume_id = backup.volume_id
snapshot_id = backup.snapshot_id
previous_status = volume.get('previous_status', None)
# Restore the original status.
if snapshot_id:
self.db.snapshot_update(
context, snapshot_id,
{'status': fields.SnapshotStatus.AVAILABLE})
else:
self.db.volume_update(context, volume_id,
{'status': previous_status,
'previous_status': 'backing-up'})
# continue_backup method above updated the status for the backup, so
# it will reflect latest status, even if it is deleted
completion_msg = 'finished'
if backup.status in (fields.BackupStatus.DELETING,
fields.BackupStatus.DELETED):
completion_msg = 'aborted'
else:
backup.status = fields.BackupStatus.AVAILABLE
backup.size = volume['size']
if updates:
backup.update(updates)
backup.save()
# Handle the num_dependent_backups of parent backup when child
# backup has created successfully.
if backup.parent_id:
parent_backup = objects.Backup.get_by_id(context,
backup.parent_id)
parent_backup.num_dependent_backups += 1
parent_backup.save()
LOG.info('Create backup %s. backup: %s.', completion_msg, backup.id)
self._notify_about_backup_usage(context, backup, "create.end")
def _is_our_backup(self, backup):
# Accept strings and Service OVO
if not isinstance(backup, str):
backup = backup.service
if not backup:
return True
# TODO(tommylikehu): We upgraded the 'driver_name' from module
# to class name, so we use 'in' here to match two namings,
# this can be replaced with equal sign during next
# release (Rocky).
if self.driver_name.startswith(backup):
return True
# We support renaming of drivers, so check old names as well
for key, value in MAPPING.items():
if key.startswith(backup) and self.driver_name.startswith(value):
return True
return False
@utils.limit_operations
def restore_backup(self, context, backup, volume_id, volume_is_new):
"""Restore volume backups from configured backup service.
:param context: RequestContext for the restore operation
:param backup: Backup that we're restoring
:param volume_id: The ID of the volume into which we're restoring
:param volume_is_new: The volume does not have stale data, so
sparse backups can be restored as such.
"""
context.message_resource_id = backup.id
context.message_resource_type = message_field.Resource.VOLUME_BACKUP
context.message_action = message_field.Action.BACKUP_RESTORE
LOG.info('Restore backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.',
{'backup_id': backup.id, 'volume_id': volume_id})
volume = objects.Volume.get_by_id(context, volume_id)
self._notify_about_backup_usage(context, backup, "restore.start")
expected_status = [fields.VolumeStatus.RESTORING_BACKUP,
fields.VolumeStatus.CREATING]
volume_previous_status = volume['status']
if volume_previous_status not in expected_status:
err = (_('Restore backup aborted, expected volume status '
'%(expected_status)s but got %(actual_status)s.') %
{'expected_status': ','.join(expected_status),
'actual_status': volume_previous_status})
backup.status = fields.BackupStatus.AVAILABLE
backup.save()
self.db.volume_update(
context, volume_id,
{'status':
(fields.VolumeStatus.ERROR if
volume_previous_status == fields.VolumeStatus.CREATING else
fields.VolumeStatus.ERROR_RESTORING)})
self.message_api.create(
context,
action=message_field.Action.BACKUP_RESTORE,
resource_type=message_field.Resource.VOLUME_BACKUP,
resource_uuid=volume.id,
detail=message_field.Detail.VOLUME_INVALID_STATE)
raise exception.InvalidVolume(reason=err)
expected_status = fields.BackupStatus.RESTORING
actual_status = backup['status']
if actual_status != expected_status:
err = (_('Restore backup aborted: expected backup status '
'%(expected_status)s but got %(actual_status)s.') %
{'expected_status': expected_status,
'actual_status': actual_status})
volume_utils.update_backup_error(backup, err)
self.db.volume_update(context, volume_id,
{'status': fields.VolumeStatus.ERROR})
self.message_api.create_from_request_context(
context,
detail=message_field.Detail.BACKUP_INVALID_STATE)
raise exception.InvalidBackup(reason=err)
if volume['size'] > backup['size']:
LOG.info('Volume: %(vol_id)s, size: %(vol_size)d is '
'larger than backup: %(backup_id)s, '
'size: %(backup_size)d, continuing with restore.',
{'vol_id': volume['id'],
'vol_size': volume['size'],
'backup_id': backup['id'],
'backup_size': backup['size']})
if not self._is_our_backup(backup):
err = _('Restore backup aborted, the backup service currently'
' configured [%(configured_service)s] is not the'
' backup service that was used to create this'
' backup [%(backup_service)s].') % {
'configured_service': self.driver_name,
'backup_service': backup.service,
}
backup.status = fields.BackupStatus.AVAILABLE
backup.save()
self.db.volume_update(context, volume_id,
{'status': fields.VolumeStatus.ERROR})
raise exception.InvalidBackup(reason=err)
canceled = False
try:
self._run_restore(context, backup, volume, volume_is_new)
except exception.BackupRestoreCancel:
canceled = True
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_update(
context, volume_id,
{'status': (fields.VolumeStatus.ERROR if
actual_status == fields.VolumeStatus.CREATING
else fields.VolumeStatus.ERROR_RESTORING)})
backup.status = fields.BackupStatus.AVAILABLE
backup.save()
if canceled:
volume.status = fields.VolumeStatus.ERROR
else:
volume.status = fields.VolumeStatus.AVAILABLE
# NOTE(tommylikehu): If previous status is 'creating', this is
# just a new created volume and we need update the 'launched_at'
# attribute as well.
if volume_previous_status == fields.VolumeStatus.CREATING:
volume['launched_at'] = timeutils.utcnow()
old_src_backup_id = self.db.volume_metadata_get(
context, volume_id).get("src_backup_id", None)
if backup.volume_id != volume.id or (
old_src_backup_id and old_src_backup_id != backup.id):
self.db.volume_metadata_update(
context,
volume.id,
{'src_backup_id': backup.id},
False)
volume.save()
backup.status = fields.BackupStatus.AVAILABLE
backup.save()
LOG.info('%(result)s restoring backup %(backup_id)s to volume '
'%(volume_id)s.',
{'result': 'Canceled' if canceled else 'Finished',
'backup_id': backup.id,
'volume_id': volume_id})
self._notify_about_backup_usage(context, backup, "restore.end")
def _run_restore(self, context, backup, volume, volume_is_new):
message_created = False
orig_key_id = volume.encryption_key_id
backup_service = self.service(context)
properties = volume_utils.brick_get_connector_properties(
CONF.use_multipath_for_image_xfer, enforce_multipath=False)
secure_enabled = (
self.volume_rpcapi.secure_file_operations_enabled(context,
volume))
try:
attach_info = self._attach_device(context, volume, properties)
except Exception:
self.message_api.create_from_request_context(
context,
detail=message_field.Detail.ATTACH_ERROR)
raise
# NOTE(geguileo): Not all I/O disk operations properly do greenthread
# context switching and may end up blocking the greenthread, so we go
# with native threads proxy-wrapping the device file object.
try:
device_path = attach_info['device']['path']
open_mode = 'rb+' if os.name == 'nt' else 'wb'
if (isinstance(device_path, str) and
not os.path.isdir(device_path)):
if secure_enabled:
with open(device_path, open_mode) as device_file:
backup_service.restore(backup, volume.id,
tpool.Proxy(device_file),
volume_is_new)
else:
with utils.temporary_chown(device_path):
with open(device_path, open_mode) as device_file:
backup_service.restore(backup, volume.id,
tpool.Proxy(device_file),
volume_is_new)
# device_path is already file-like so no need to open it
else:
backup_service.restore(backup, volume.id,
tpool.Proxy(device_path),
volume_is_new)
except exception.BackupRestoreCancel:
raise
except Exception:
LOG.exception('Restoring backup %(backup_id)s to volume '
'%(volume_id)s failed.', {'backup_id': backup.id,
'volume_id': volume.id})
# We set message_create to True before creating the
# message because if the message create call fails
# and is catched by the base/outer exception handler
# then we will end up storing a wrong message
message_created = True
self.message_api.create_from_request_context(
context,
detail=message_field.Detail.BACKUP_RESTORE_ERROR)
raise
finally:
try:
self._detach_device(context, attach_info, volume, properties,
force=True)
except Exception:
if not message_created:
self.message_api.create_from_request_context(
context,
detail=message_field.Detail.DETACH_ERROR)
raise
# Regardless of whether the restore was successful, do some
# housekeeping to ensure the restored volume's encryption key ID is
# unique, and any previous key ID is deleted. Start by fetching fresh
# info on the restored volume.
restored_volume = objects.Volume.get_by_id(context, volume.id)
restored_key_id = restored_volume.encryption_key_id
if restored_key_id != orig_key_id:
LOG.info('Updating encryption key ID for volume %(volume_id)s '
'from backup %(backup_id)s.',
{'volume_id': volume.id, 'backup_id': backup.id})
key_mgr = key_manager.API(CONF)
if orig_key_id:
LOG.debug('Deleting original volume encryption key ID.')
volume_utils.delete_encryption_key(context,
key_mgr,
orig_key_id)
if backup.encryption_key_id is None:
# This backup predates the current code that stores the cloned
# key ID in the backup database. Fortunately, the key ID
# restored from the backup data _is_ a clone of the original
# volume's key ID, so grab it.
LOG.debug('Gleaning backup encryption key ID from metadata.')
backup.encryption_key_id = restored_key_id
backup.save()
# Clone the key ID again to ensure every restored volume has
# a unique key ID. The volume's key ID should not be the same
# as the backup.encryption_key_id (the copy made when the backup
# was first created).
new_key_id = volume_utils.clone_encryption_key(
context,
key_mgr,
backup.encryption_key_id)
restored_volume.encryption_key_id = new_key_id
restored_volume.save()
else:
LOG.debug('Encryption key ID for volume %(volume_id)s already '
'matches encryption key ID in backup %(backup_id)s.',
{'volume_id': volume.id, 'backup_id': backup.id})
def delete_backup(self, context, backup):
"""Delete volume backup from configured backup service."""
LOG.info('Delete backup started, backup: %s.', backup.id)
self._notify_about_backup_usage(context, backup, "delete.start")
context.message_resource_id = backup.id
context.message_resource_type = message_field.Resource.VOLUME_BACKUP
context.message_action = message_field.Action.BACKUP_DELETE
expected_status = fields.BackupStatus.DELETING
actual_status = backup.status
if actual_status != expected_status:
err = _('Delete_backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') \
% {'expected_status': expected_status,
'actual_status': actual_status}
volume_utils.update_backup_error(backup, err)
self.message_api.create_from_request_context(
context,
detail=message_field.Detail.BACKUP_INVALID_STATE)
raise exception.InvalidBackup(reason=err)
if backup.service and not self.is_working():
err = _('Delete backup is aborted due to backup service is down.')
status = fields.BackupStatus.ERROR_DELETING
volume_utils.update_backup_error(backup, err, status)
self.message_api.create_from_request_context(
context,
detail=message_field.Detail.BACKUP_SERVICE_DOWN)
raise exception.InvalidBackup(reason=err)
if not self._is_our_backup(backup):
err = _('Delete backup aborted, the backup service currently'
' configured [%(configured_service)s] is not the'
' backup service that was used to create this'
' backup [%(backup_service)s].')\
% {'configured_service': self.driver_name,
'backup_service': backup.service}
volume_utils.update_backup_error(backup, err)
raise exception.InvalidBackup(reason=err)
if backup.service:
try:
backup_service = self.service(context)
backup_service.delete_backup(backup)
except Exception as err:
with excutils.save_and_reraise_exception():
volume_utils.update_backup_error(backup, str(err))
self.message_api.create_from_request_context(
context,
detail=message_field.Detail.BACKUP_DELETE_DRIVER_ERROR)
# Get reservations
try:
reserve_opts = {
'backups': -1,
'backup_gigabytes': -backup.size,
}
reservations = QUOTAS.reserve(context,
project_id=backup.project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception("Failed to update usages deleting backup")
if backup.encryption_key_id is not None:
volume_utils.delete_encryption_key(context,
key_manager.API(CONF),
backup.encryption_key_id)
backup.encryption_key_id = None
backup.save()
backup.destroy()
# If this backup is incremental backup, handle the
# num_dependent_backups of parent backup
if backup.parent_id:
parent_backup = objects.Backup.get_by_id(context,
backup.parent_id)
if parent_backup.has_dependent_backups:
parent_backup.num_dependent_backups -= 1
parent_backup.save()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations,
project_id=backup.project_id)
LOG.info('Delete backup finished, backup %s deleted.', backup.id)
self._notify_about_backup_usage(context, backup, "delete.end")
def _notify_about_backup_usage(self,
context,
backup,
event_suffix,
extra_usage_info=None):
volume_utils.notify_about_backup_usage(
context, backup, event_suffix,
extra_usage_info=extra_usage_info,
host=self.host)
def export_record(self, context, backup):
"""Export all volume backup metadata details to allow clean import.
Export backup metadata so it could be re-imported into the database
without any prerequisite in the backup database.
:param context: running context
:param backup: backup object to export
:returns: backup_record - a description of how to import the backup
:returns: contains 'backup_url' - how to import the backup, and
:returns: 'backup_service' describing the needed driver.
:raises InvalidBackup:
"""
LOG.info('Export record started, backup: %s.', backup.id)
expected_status = fields.BackupStatus.AVAILABLE
actual_status = backup.status
if actual_status != expected_status:
err = (_('Export backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') %
{'expected_status': expected_status,
'actual_status': actual_status})
raise exception.InvalidBackup(reason=err)
backup_record = {'backup_service': backup.service}
if not self._is_our_backup(backup):
err = (_('Export record aborted, the backup service currently '
'configured [%(configured_service)s] is not the '
'backup service that was used to create this '
'backup [%(backup_service)s].') %
{'configured_service': self.driver_name,
'backup_service': backup.service})
raise exception.InvalidBackup(reason=err)
# Call driver to create backup description string
try:
backup_service = self.service(context)
driver_info = backup_service.export_record(backup)
backup_url = backup.encode_record(driver_info=driver_info)
backup_record['backup_url'] = backup_url
except Exception as err:
msg = str(err)
raise exception.InvalidBackup(reason=msg)
LOG.info('Export record finished, backup %s exported.', backup.id)
return backup_record
def import_record(self,
context,
backup,
backup_service,
backup_url,
backup_hosts):
"""Import all volume backup metadata details to the backup db.
:param context: running context
:param backup: The new backup object for the import
:param backup_service: The needed backup driver for import
:param backup_url: An identifier string to locate the backup
:param backup_hosts: Potential hosts to execute the import
:raises InvalidBackup:
:raises ServiceNotFound:
"""
LOG.info('Import record started, backup_url: %s.', backup_url)
# Can we import this backup?
if not self._is_our_backup(backup_service):
# No, are there additional potential backup hosts in the list?
if len(backup_hosts) > 0:
# try the next host on the list, maybe he can import
first_host = backup_hosts.pop()
self.backup_rpcapi.import_record(context,
first_host,
backup,
backup_service,
backup_url,
backup_hosts)
else:
# empty list - we are the last host on the list, fail
err = _('Import record failed, cannot find backup '
'service to perform the import. Request service '
'%(service)s.') % {'service': backup_service}
volume_utils.update_backup_error(backup, err)
raise exception.ServiceNotFound(service_id=backup_service)
else:
# Yes...
try:
# Deserialize backup record information
backup_options = backup.decode_record(backup_url)
# Extract driver specific info and pass it to the driver
driver_options = backup_options.pop('driver_info', {})
backup_service = self.service(context)
backup_service.import_record(backup, driver_options)
except Exception as err:
msg = str(err)
volume_utils.update_backup_error(backup, msg)
raise exception.InvalidBackup(reason=msg)
required_import_options = {
'display_name',
'display_description',
'container',
'size',
'service_metadata',
'object_count',
'id'
}
# Check for missing fields in imported data
missing_opts = required_import_options - set(backup_options)
if missing_opts:
msg = (_('Driver successfully decoded imported backup data, '
'but there are missing fields (%s).') %
', '.join(missing_opts))
volume_utils.update_backup_error(backup, msg)
raise exception.InvalidBackup(reason=msg)
# Confirm the ID from the record in the DB is the right one
backup_id = backup_options['id']
if backup_id != backup.id:
msg = (_('Trying to import backup metadata from id %(meta_id)s'
' into backup %(id)s.') %
{'meta_id': backup_id, 'id': backup.id})
volume_utils.update_backup_error(backup, msg)
raise exception.InvalidBackup(reason=msg)
# Overwrite some fields
backup_options['service'] = self.driver_name
backup_options['availability_zone'] = self.az
backup_options['host'] = self.host
# Remove some values which are not actual fields and some that
# were set by the API node
for key in ('name', 'user_id', 'project_id', 'deleted_at',
'deleted', 'fail_reason', 'status'):
backup_options.pop(key, None)
# Update the database
backup.update(backup_options)
backup.save()
# Update the backup's status
backup.update({"status": fields.BackupStatus.AVAILABLE})
backup.save()
LOG.info('Import record id %s metadata from driver '
'finished.', backup.id)
def reset_status(self, context, backup, status):
"""Reset volume backup status.
:param context: running context
:param backup: The backup object for reset status operation
:param status: The status to be set
:raises InvalidBackup:
:raises AttributeError:
"""
LOG.info('Reset backup status started, backup_id: '
'%(backup_id)s, status: %(status)s.',
{'backup_id': backup.id,
'status': status})
LOG.info('Backup service: %s.', backup.service)
if not self._is_our_backup(backup):
err = _('Reset backup status aborted, the backup service'
' currently configured [%(configured_service)s] '
'is not the backup service that was used to create'
' this backup [%(backup_service)s].') % \
{'configured_service': self.driver_name,
'backup_service': backup.service}
raise exception.InvalidBackup(reason=err)
if backup.service is not None:
backup.status = status
backup.save()
# Needs to clean temporary volumes and snapshots.
try:
self._cleanup_temp_volumes_snapshots_for_one_backup(
context, backup)
except Exception:
LOG.exception("Problem cleaning temp volumes and "
"snapshots for backup %(bkup)s.",
{'bkup': backup.id})
volume_utils.notify_about_backup_usage(context, backup,
'reset_status.end')
def check_support_to_force_delete(self, context):
"""Check if the backup driver supports force delete operation.
:param context: running context
"""
backup_service = self.service(context)
return backup_service.support_force_delete
def _attach_device(self, ctxt, backup_device,
properties, is_snapshot=False):
"""Attach backup device."""
if not is_snapshot:
return self._attach_volume(ctxt, backup_device, properties)
else:
return self._attach_snapshot(ctxt, backup_device, properties)
def _attach_volume(self, context, volume, properties):
"""Attach a volume."""
try:
conn = self.volume_rpcapi.initialize_connection(context,
volume,
properties)
return self._connect_device(conn)
except Exception:
with excutils.save_and_reraise_exception():
try:
self.volume_rpcapi.terminate_connection(context, volume,
properties,
force=True)
except Exception:
LOG.warning("Failed to terminate the connection "
"of volume %(volume_id)s, but it is "
"acceptable.",
{'volume_id': volume.id})
def _attach_snapshot(self, ctxt, snapshot, properties):
"""Attach a snapshot."""
try:
conn = self.volume_rpcapi.initialize_connection_snapshot(
ctxt, snapshot, properties)
return self._connect_device(conn)
except Exception:
with excutils.save_and_reraise_exception():
try:
self.volume_rpcapi.terminate_connection_snapshot(
ctxt, snapshot, properties, force=True)
except Exception:
LOG.warning("Failed to terminate the connection "
"of snapshot %(snapshot_id)s, but it is "
"acceptable.",
{'snapshot_id': snapshot.id})
def _connect_device(self, conn):
"""Establish connection to device."""
use_multipath = CONF.use_multipath_for_image_xfer
device_scan_attempts = CONF.num_volume_device_scan_tries
protocol = conn['driver_volume_type']
connector = volume_utils.brick_get_connector(
protocol,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
conn=conn,
expect_raw_disk=True)
vol_handle = connector.connect_volume(conn['data'])
return {'conn': conn, 'device': vol_handle, 'connector': connector}
def _detach_device(self, ctxt, attach_info, device,
properties, is_snapshot=False, force=False,
ignore_errors=False):
"""Disconnect the volume or snapshot from the host. """
connector = attach_info['connector']
connector.disconnect_volume(attach_info['conn']['data'],
attach_info['device'],
force=force, ignore_errors=ignore_errors)
rpcapi = self.volume_rpcapi
if not is_snapshot:
rpcapi.terminate_connection(ctxt, device, properties,
force=force)
rpcapi.remove_export(ctxt, device, sync=True)
else:
rpcapi.terminate_connection_snapshot(ctxt, device,
properties, force=force)
rpcapi.remove_export_snapshot(ctxt, device, sync=True)
def is_working(self):
return self.is_initialized
@periodic_task.periodic_task(
spacing=CONF.backup_driver_stats_polling_interval)
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def _report_driver_status(self, context):
backup_stats = {
'backend_state': self.is_working(),
'driver_name': self.driver_name,
'availability_zone': self.az
}
self.update_service_capabilities(backup_stats)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/backup/rpcapi.py 0000664 0000000 0000000 00000012334 15131732575 0022734 0 ustar 00root root 0000000 0000000 # Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the volume backup RPC API.
"""
from oslo_log import log as logging
from cinder.common import constants
from cinder import rpc
LOG = logging.getLogger(__name__)
class BackupAPI(rpc.RPCAPI):
"""Client side of the volume rpc API.
API version history:
.. code-block:: none
1.0 - Initial version.
1.1 - Changed methods to accept backup objects instead of IDs.
1.2 - A version that got in by mistake (without breaking anything).
1.3 - Dummy version bump to mark start of having cinder-backup service
decoupled from cinder-volume.
... Mitaka supports messaging 1.3. Any changes to existing methods in
1.x after this point should be done so that they can handle version cap
set to 1.3.
2.0 - Remove 1.x compatibility
2.1 - Adds set_log_levels and get_log_levels
2.2 - Adds publish_service_capabilities
2.3 - Adds continue_backup call
2.4 - Add the volume_is_new flag to the restore_backup method
"""
RPC_API_VERSION = '2.4'
RPC_DEFAULT_VERSION = '2.0'
TOPIC = constants.BACKUP_TOPIC
BINARY = 'cinder-backup'
def create_backup(self, ctxt, backup):
LOG.debug("create_backup in rpcapi backup_id %s", backup.id)
cctxt = self._get_cctxt(server=backup.host)
cctxt.cast(ctxt, 'create_backup', backup=backup)
def continue_backup(self, ctxt, backup, backup_device):
LOG.debug("continue_backup in rpcapi backup_id %s", backup.id)
cctxt = self._get_cctxt(server=backup.host)
cctxt.cast(ctxt, 'continue_backup', backup=backup,
backup_device=backup_device)
def restore_backup(self, ctxt, backup_host, backup, volume_id,
volume_is_new):
LOG.debug("restore_backup in rpcapi backup_id %s", backup.id)
cctxt = self._get_cctxt(server=backup_host)
if self.client.can_send_version('2.4'):
cctxt.cast(ctxt, 'restore_backup', backup=backup,
volume_id=volume_id, volume_is_new=volume_is_new)
else:
cctxt.cast(ctxt, 'restore_backup', backup=backup,
volume_id=volume_id)
def delete_backup(self, ctxt, backup):
LOG.debug("delete_backup rpcapi backup_id %s", backup.id)
cctxt = self._get_cctxt(server=backup.host)
cctxt.cast(ctxt, 'delete_backup', backup=backup)
def export_record(self, ctxt, backup) -> dict:
LOG.debug("export_record in rpcapi backup_id %(id)s "
"on host %(host)s.",
{'id': backup.id,
'host': backup.host})
cctxt = self._get_cctxt(server=backup.host)
return cctxt.call(ctxt, 'export_record', backup=backup)
def import_record(self, ctxt, host, backup, backup_service, backup_url,
backup_hosts) -> None:
LOG.debug("import_record rpcapi backup id %(id)s "
"on host %(host)s for backup_url %(url)s.",
{'id': backup.id, 'host': host, 'url': backup_url})
cctxt = self._get_cctxt(server=host)
cctxt.cast(ctxt, 'import_record',
backup=backup,
backup_service=backup_service,
backup_url=backup_url,
backup_hosts=backup_hosts)
def reset_status(self, ctxt, backup, status):
LOG.debug("reset_status in rpcapi backup_id %(id)s "
"on host %(host)s.",
{'id': backup.id, 'host': backup.host})
cctxt = self._get_cctxt(server=backup.host)
cctxt.cast(ctxt, 'reset_status', backup=backup, status=status)
def check_support_to_force_delete(self, ctxt, host) -> bool:
LOG.debug("Check if backup driver supports force delete "
"on host %(host)s.", {'host': host})
cctxt = self._get_cctxt(server=host)
return cctxt.call(ctxt, 'check_support_to_force_delete')
@rpc.assert_min_rpc_version('2.1')
def set_log_levels(self, context, service, log_request):
cctxt = self._get_cctxt(server=service.host, version='2.1')
cctxt.cast(context, 'set_log_levels', log_request=log_request)
@rpc.assert_min_rpc_version('2.1')
def get_log_levels(self, context, service, log_request):
cctxt = self._get_cctxt(server=service.host, version='2.1')
return cctxt.call(context, 'get_log_levels', log_request=log_request)
@rpc.assert_min_rpc_version('2.2')
def publish_service_capabilities(self, ctxt):
cctxt = self._get_cctxt(version='2.2', fanout=True)
cctxt.cast(ctxt, 'publish_service_capabilities')
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/brick/ 0000775 0000000 0000000 00000000000 15131732575 0020726 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/brick/README.txt 0000664 0000000 0000000 00000000261 15131732575 0022423 0 ustar 00root root 0000000 0000000 Brick has been migrated to a new standalone
pypi library called os-brick.
We are leaving the local_dev directory here for the time
being until we can migrate it to a new home.
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/brick/__init__.py 0000664 0000000 0000000 00000000000 15131732575 0023025 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/brick/local_dev/ 0000775 0000000 0000000 00000000000 15131732575 0022656 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/brick/local_dev/__init__.py 0000664 0000000 0000000 00000000000 15131732575 0024755 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/brick/local_dev/lvm.py 0000664 0000000 0000000 00000103304 15131732575 0024027 0 ustar 00root root 0000000 0000000 # Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
LVM class for performing LVM operations.
"""
import math
import os
import re
from os_brick import executor
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from oslo_utils import excutils
from cinder import exception
import cinder.privsep.lvm
from cinder import utils
LOG = logging.getLogger(__name__)
MINIMUM_LVM_VERSION = (2, 2, 107)
class LVM(executor.Executor):
"""LVM object to enable various LVM related operations."""
LVM_CMD_PREFIX = ['env', 'LC_ALL=C']
_supports_pvs_ignoreskippedcluster = None
def __init__(self, vg_name, root_helper, create_vg=False,
physical_volumes=None, lvm_type='default',
executor=putils.execute, lvm_conf=None,
suppress_fd_warn=False):
"""Initialize the LVM object.
The LVM object is based on an LVM VolumeGroup, one instantiation
for each VolumeGroup you have/use.
:param vg_name: Name of existing VG or VG to create
:param root_helper: Execution root_helper method to use
:param create_vg: Indicates the VG doesn't exist
and we want to create it
:param physical_volumes: List of PVs to build VG on
:param lvm_type: VG and Volume type (default, or thin)
:param executor: Execute method to use, None uses common/processutils
:param suppress_fd_warn: Add suppress FD Warn to LVM env
"""
super(LVM, self).__init__(execute=executor, root_helper=root_helper)
self.vg_name = vg_name
self.pv_list = []
self.vg_size = 0.0
self.vg_free_space = 0.0
self.vg_lv_count = 0
self.vg_uuid = None
self.vg_thin_pool = None
self.vg_thin_pool_size = 0.0
self.vg_thin_pool_free_space = 0.0
self._supports_snapshot_lv_activation = None
self._supports_lvchange_ignoreskipactivation = None
self.vg_provisioned_capacity = 0.0
if lvm_type not in ['default', 'thin']:
raise exception.Invalid('lvm_type must be "default" or "thin"')
# Ensure LVM_SYSTEM_DIR has been added to LVM.LVM_CMD_PREFIX
# before the first LVM command is executed, and use the directory
# where the specified lvm_conf file is located as the value.
# NOTE(jdg): We use the temp var here because LVM_CMD_PREFIX is a
# class global and if you use append here, you'll literally just keep
# appending values to the global.
_lvm_cmd_prefix = ['env', 'LC_ALL=C']
if lvm_conf and os.path.isfile(lvm_conf):
lvm_sys_dir = os.path.dirname(lvm_conf)
_lvm_cmd_prefix.append('LVM_SYSTEM_DIR=' + lvm_sys_dir)
if suppress_fd_warn:
_lvm_cmd_prefix.append('LVM_SUPPRESS_FD_WARNINGS=1')
LVM.LVM_CMD_PREFIX = _lvm_cmd_prefix
lvm_version = LVM.get_lvm_version(root_helper)
if LVM.get_lvm_version(root_helper) < MINIMUM_LVM_VERSION:
LOG.warning("LVM version %(current)s is lower than the minimum "
"supported version: %(supported)s",
{'current': lvm_version,
'supported': MINIMUM_LVM_VERSION})
if create_vg and physical_volumes is not None:
try:
self._create_vg(physical_volumes)
except putils.ProcessExecutionError as err:
LOG.exception('Error creating Volume Group')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise exception.VolumeGroupCreationFailed(vg_name=self.vg_name)
if self._vg_exists() is False:
LOG.error('Unable to locate Volume Group %s', vg_name)
raise exception.VolumeGroupNotFound(vg_name=vg_name)
# NOTE: we assume that the VG has been activated outside of Cinder
if lvm_type == 'thin':
pool_name = "%s-pool" % self.vg_name
if self.get_volume(pool_name) is None:
try:
self.create_thin_pool(pool_name)
except putils.ProcessExecutionError:
# Maybe we just lost the race against another copy of
# this driver being in init in parallel - e.g.
# cinder-volume and cinder-backup starting in parallel
if self.get_volume(pool_name) is None:
raise
self.vg_thin_pool = pool_name
self.activate_lv(self.vg_thin_pool)
self.pv_list = self.get_all_physical_volumes(root_helper, vg_name)
def _vg_exists(self):
"""Simple check to see if VG exists.
:returns: True if vg specified in object exists, else False
"""
exists = False
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings',
'-o', 'name', self.vg_name]
(out, _err) = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out is not None:
volume_groups = out.split()
if self.vg_name in volume_groups:
exists = True
return exists
def _create_vg(self, pv_list):
cinder.privsep.lvm.create_vg(self.vg_name, pv_list)
@utils.retry(retry=utils.retry_if_exit_code, retry_param=139, interval=0.5,
backoff_rate=0.5)
def _run_lvm_command(self,
cmd_arg_list: list,
root_helper: str = None,
run_as_root: bool = True) -> tuple:
"""Run LVM commands with a retry on code 139 to work around LVM bugs.
Refer to LP bug 1901783, LP bug 1932188.
"""
if not root_helper:
root_helper = self._root_helper
(out, err) = self._execute(*cmd_arg_list,
root_helper=root_helper,
run_as_root=run_as_root)
return (out, err)
def _get_thin_pool_free_space(self, vg_name, thin_pool_name):
"""Returns available thin pool free space.
:param vg_name: the vg where the pool is placed
:param thin_pool_name: the thin pool to gather info for
:returns: Free space in GB (float), calculated using data_percent
"""
cmd = LVM.LVM_CMD_PREFIX +\
['lvs', '--noheadings', '--unit=g',
'-o', 'size,data_percent', '--separator',
':', '--nosuffix']
# NOTE(gfidente): data_percent only applies to some types of LV so we
# make sure to append the actual thin pool name
cmd.append("/dev/%s/%s" % (vg_name, thin_pool_name))
free_space = 0.0
try:
(out, err) = self._run_lvm_command(cmd)
if out is not None:
out = out.strip()
data = out.split(':')
pool_size = float(data[0])
data_percent = float(data[1])
consumed_space = pool_size / 100 * data_percent
free_space = pool_size - consumed_space
free_space = round(free_space, 2)
# Need noqa due to a false error about the 'err' variable being unused
# even though it is used in the logging. Possibly related to
# https://github.com/PyCQA/pyflakes/issues/378.
except putils.ProcessExecutionError as err: # noqa
LOG.exception('Error querying thin pool about data_percent')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
return free_space
@staticmethod
def get_lvm_version(root_helper):
"""Static method to get LVM version from system.
:param root_helper: root_helper to use for execute
:returns: version 3-tuple
"""
cmd = LVM.LVM_CMD_PREFIX + ['lvm', 'version']
(out, _err) = putils.execute(*cmd)
lines = out.split('\n')
for line in lines:
if 'LVM version' in line:
version_list = line.split()
# NOTE(gfidente): version is formatted as follows:
# major.minor.patchlevel(library API version)[-customisation]
version = version_list[2]
version_filter = r"(\d+)\.(\d+)\.(\d+).*"
r = re.search(version_filter, version)
version_tuple = tuple(map(int, r.group(1, 2, 3)))
return version_tuple
@staticmethod
def supports_thin_provisioning(root_helper):
"""Static method to check for thin LVM support on a system.
:param root_helper: root_helper to use for execute
:returns: True if supported, False otherwise
"""
return LVM.get_lvm_version(root_helper) >= (2, 2, 95)
@property
def supports_snapshot_lv_activation(self):
"""Property indicating whether snap activation changes are supported.
Check for LVM version >= 2.02.91.
(LVM2 git: e8a40f6 Allow to activate snapshot)
:returns: True/False indicating support
"""
if self._supports_snapshot_lv_activation is not None:
return self._supports_snapshot_lv_activation
self._supports_snapshot_lv_activation = (
self.get_lvm_version(self._root_helper) >= (2, 2, 91))
return self._supports_snapshot_lv_activation
@property
def supports_lvchange_ignoreskipactivation(self):
"""Property indicating whether lvchange can ignore skip activation.
Check for LVM version >= 2.02.99.
(LVM2 git: ab789c1bc add --ignoreactivationskip to lvchange)
"""
if self._supports_lvchange_ignoreskipactivation is not None:
return self._supports_lvchange_ignoreskipactivation
self._supports_lvchange_ignoreskipactivation = (
self.get_lvm_version(self._root_helper) >= (2, 2, 99))
return self._supports_lvchange_ignoreskipactivation
@staticmethod
def supports_pvs_ignoreskippedcluster(root_helper):
"""Property indicating whether pvs supports --ignoreskippedcluster
Check for LVM version >= 2.02.103.
(LVM2 git: baf95bbff cmdline: Add --ignoreskippedcluster.
"""
if LVM._supports_pvs_ignoreskippedcluster is not None:
return LVM._supports_pvs_ignoreskippedcluster
LVM._supports_pvs_ignoreskippedcluster = (
LVM.get_lvm_version(root_helper) >= (2, 2, 103))
return LVM._supports_pvs_ignoreskippedcluster
@staticmethod
@utils.retry(retry=utils.retry_if_exit_code, retry_param=139, interval=0.5,
backoff_rate=0.5) # Bug#1901783
def get_lv_info(root_helper, vg_name=None, lv_name=None):
"""Retrieve info about LVs (all, in a VG, or a single LV).
:param root_helper: root_helper to use for execute
:param vg_name: optional, gathers info for only the specified VG
:param lv_name: optional, gathers info for only the specified LV
:returns: List of Dictionaries with LV info
"""
cmd = LVM.LVM_CMD_PREFIX + ['lvs', '--noheadings', '--unit=g',
'-o', 'vg_name,name,size', '--nosuffix',
'--readonly']
if lv_name is not None and vg_name is not None:
cmd.append("%s/%s" % (vg_name, lv_name))
elif vg_name is not None:
cmd.append(vg_name)
try:
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception(reraise=True) as ctx:
if "not found" in err.stderr or "Failed to find" in err.stderr:
ctx.reraise = False
LOG.info("Logical Volume not found when querying "
"LVM info. (vg_name=%(vg)s, lv_name=%(lv)s",
{'vg': vg_name, 'lv': lv_name})
out = None
lv_list = []
if out is not None:
volumes = out.split()
iterator = zip(*[iter(volumes)] * 3) # pylint: disable=E1101
for vg, name, size in iterator:
lv_list.append({"vg": vg, "name": name, "size": size})
return lv_list
def get_volumes(self, lv_name=None):
"""Get all LV's associated with this instantiation (VG).
:returns: List of Dictionaries with LV info
"""
return self.get_lv_info(self._root_helper,
self.vg_name,
lv_name)
def get_volume(self, name):
"""Get reference object of volume specified by name.
:returns: dict representation of Logical Volume if exists
"""
ref_list = self.get_volumes(name)
for r in ref_list:
if r['name'] == name:
return r
return None
@staticmethod
def get_all_physical_volumes(root_helper, vg_name=None):
"""Static method to get all PVs on a system.
:param root_helper: root_helper to use for execute
:param vg_name: optional, gathers info for only the specified VG
:returns: List of Dictionaries with PV info
"""
field_sep = '|'
cmd = LVM.LVM_CMD_PREFIX + ['pvs', '--noheadings',
'--unit=g',
'-o', 'vg_name,name,size,free',
'--separator', field_sep,
'--nosuffix']
if LVM.supports_pvs_ignoreskippedcluster(root_helper):
cmd.append('--ignoreskippedcluster')
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
pvs = out.split()
if vg_name is not None:
pvs = [pv for pv in pvs if vg_name == pv.split(field_sep)[0]]
pv_list = []
for pv in pvs:
fields = pv.split(field_sep)
pv_list.append({'vg': fields[0],
'name': fields[1],
'size': float(fields[2]),
'available': float(fields[3])})
return pv_list
@staticmethod
def get_all_volume_groups(root_helper, vg_name=None):
"""Static method to get all VGs on a system.
:param root_helper: root_helper to use for execute
:param vg_name: optional, gathers info for only the specified VG
:returns: List of Dictionaries with VG info
"""
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings',
'--unit=g', '-o',
'name,size,free,lv_count,uuid',
'--separator', ':',
'--nosuffix']
if vg_name is not None:
cmd.append(vg_name)
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
vg_list = []
if out is not None:
vgs = out.split()
for vg in vgs:
fields = vg.split(':')
vg_list.append({'name': fields[0],
'size': float(fields[1]),
'available': float(fields[2]),
'lv_count': int(fields[3]),
'uuid': fields[4]})
return vg_list
def update_volume_group_info(self):
"""Update VG info for this instantiation.
Used to update member fields of object and
provide a dict of info for caller.
:returns: Dictionaries of VG info
"""
vg_list = self.get_all_volume_groups(self._root_helper, self.vg_name)
if len(vg_list) != 1:
LOG.error('Unable to find VG: %s', self.vg_name)
raise exception.VolumeGroupNotFound(vg_name=self.vg_name)
self.vg_size = float(vg_list[0]['size'])
self.vg_free_space = float(vg_list[0]['available'])
self.vg_lv_count = int(vg_list[0]['lv_count'])
self.vg_uuid = vg_list[0]['uuid']
total_vols_size = 0.0
if self.vg_thin_pool is not None:
# NOTE(xyang): If providing only self.vg_name,
# get_lv_info will output info on the thin pool and all
# individual volumes.
# get_lv_info(self._root_helper, 'stack-vg')
# sudo lvs --noheadings --unit=g -o vg_name,name,size
# --nosuffix stack-vg
# stack-vg stack-pool 9.51
# stack-vg volume-13380d16-54c3-4979-9d22-172082dbc1a1 1.00
# stack-vg volume-629e13ab-7759-46a5-b155-ee1eb20ca892 1.00
# stack-vg volume-e3e6281c-51ee-464c-b1a7-db6c0854622c 1.00
#
# If providing both self.vg_name and self.vg_thin_pool,
# get_lv_info will output only info on the thin pool, but not
# individual volumes.
# get_lv_info(self._root_helper, 'stack-vg', 'stack-pool')
# sudo lvs --noheadings --unit=g -o vg_name,name,size
# --nosuffix stack-vg/stack-pool
# stack-vg stack-pool 9.51
#
# We need info on both the thin pool and the volumes,
# therefore we should provide only self.vg_name, but not
# self.vg_thin_pool here.
for lv in self.get_lv_info(self._root_helper,
self.vg_name):
lvsize = lv['size']
# get_lv_info runs "lvs" command with "--nosuffix".
# This removes "g" from "1.00g" and only outputs "1.00".
# Running "lvs" command without "--nosuffix" will output
# "1.00g" if "g" is the unit.
# Remove the unit if it is in lv['size'].
if not lv['size'][-1].isdigit():
lvsize = lvsize[:-1]
if lv['name'] == self.vg_thin_pool:
self.vg_thin_pool_size = float(lvsize)
tpfs = self._get_thin_pool_free_space(self.vg_name,
self.vg_thin_pool)
self.vg_thin_pool_free_space = tpfs
else:
total_vols_size = total_vols_size + float(lvsize)
total_vols_size = round(total_vols_size, 2)
self.vg_provisioned_capacity = total_vols_size
def _calculate_thin_pool_size(self):
"""Calculates the correct size for a thin pool.
Ideally we would use 100% of the containing volume group and be done.
But the 100%VG notation to lvcreate is not implemented and thus cannot
be used. See https://bugzilla.redhat.com/show_bug.cgi?id=998347
Further, some amount of free space must remain in the volume group for
metadata for the contained logical volumes. The exact amount depends
on how much volume sharing you expect.
:returns: An lvcreate-ready string for the number of calculated bytes.
"""
# make sure volume group information is current
self.update_volume_group_info()
# leave 5% free for metadata
return "%sg" % (self.vg_free_space * 0.95)
def create_thin_pool(self, name=None, size_str=None):
"""Creates a thin provisioning pool for this VG.
The syntax here is slightly different than the default
lvcreate -T, so we'll just write a custom cmd here
and do it.
:param name: Name to use for pool, default is "-pool"
:param size_str: Size to allocate for pool, default is entire VG
:returns: The size string passed to the lvcreate command
"""
if not self.supports_thin_provisioning(self._root_helper):
LOG.error('Requested to setup thin provisioning, '
'however current LVM version does not '
'support it.')
return None
if name is None:
name = '%s-pool' % self.vg_name
vg_pool_name = '%s/%s' % (self.vg_name, name)
if not size_str:
size_str = self._calculate_thin_pool_size()
cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-T', '-L', size_str,
vg_pool_name]
LOG.debug("Creating thin pool '%(pool)s' with size %(size)s of "
"total %(free)sg", {'pool': vg_pool_name,
'size': size_str,
'free': self.vg_free_space})
self._run_lvm_command(cmd)
self.vg_thin_pool = name
return size_str
def create_volume(self, name, size_str, lv_type='default', mirror_count=0):
"""Creates a logical volume on the object's VG.
:param name: Name to use when creating Logical Volume
:param size_str: Size to use when creating Logical Volume
:param lv_type: Type of Volume (default or thin)
:param mirror_count: Use LVM mirroring with specified count
"""
if lv_type == 'thin':
pool_path = '%s/%s' % (self.vg_name, self.vg_thin_pool)
cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-T', '-V', size_str, '-n',
name, pool_path]
else:
cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-n', name, self.vg_name,
'-L', size_str]
if mirror_count > 0:
cmd.extend(['--type=mirror', '-m', mirror_count, '--nosync',
'--mirrorlog', 'mirrored'])
terras = int(size_str[:-1]) / 1024.0
if terras >= 1.5:
rsize = int(2 ** math.ceil(math.log(terras) / math.log(2)))
# NOTE(vish): Next power of two for region size. See:
# http://red.ht/U2BPOD
cmd.extend(['-R', str(rsize)])
try:
self._run_lvm_command(cmd)
except putils.ProcessExecutionError as err:
LOG.exception('Error creating Volume')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
LOG.error('Current state: %s',
self.get_all_volume_groups(self._root_helper))
raise
@utils.retry(putils.ProcessExecutionError)
def create_lv_snapshot(self, name, source_lv_name, lv_type='default'):
"""Creates a snapshot of a logical volume.
:param name: Name to assign to new snapshot
:param source_lv_name: Name of Logical Volume to snapshot
:param lv_type: Type of LV (default or thin)
"""
source_lvref = self.get_volume(source_lv_name)
if source_lvref is None:
LOG.error("Trying to create snapshot by non-existent LV: %s",
source_lv_name)
raise exception.VolumeDeviceNotFound(device=source_lv_name)
cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '--name', name, '--snapshot',
'%s/%s' % (self.vg_name, source_lv_name)]
if lv_type != 'thin':
size = source_lvref['size']
cmd.extend(['-L', '%sg' % (size)])
try:
self._run_lvm_command(cmd)
except putils.ProcessExecutionError as err:
LOG.exception('Error creating snapshot')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
def _mangle_lv_name(self, name):
# Linux LVM reserves name that starts with snapshot, so that
# such volume name can't be created. Mangle it.
if not name.startswith('snapshot'):
return name
return '_' + name
def _lv_is_active(self, name):
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
'Attr', '%s/%s' % (self.vg_name, name)]
out, _err = self._run_lvm_command(cmd)
if out:
out = out.strip()
if (out[4] == 'a'):
return True
return False
@utils.retry(exception.VolumeNotDeactivated, retries=1, interval=2)
def deactivate_lv(self, name):
lv_path = self.vg_name + '/' + self._mangle_lv_name(name)
cmd = ['lvchange', '-a', 'n']
cmd.append(lv_path)
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception('Error deactivating LV, retry may be possible')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
# Wait until lv is deactivated to return in
# order to prevent a race condition.
self._wait_for_volume_deactivation(name)
@utils.retry(retry_param=exception.VolumeNotDeactivated, retries=5,
backoff_rate=2)
def _wait_for_volume_deactivation(self, name):
LOG.debug("Checking to see if volume %s has been deactivated.",
name)
if self._lv_is_active(name):
LOG.debug("Volume %s is still active.", name)
raise exception.VolumeNotDeactivated(name=name)
else:
LOG.debug("Volume %s has been deactivated.", name)
@utils.retry(putils.ProcessExecutionError, retries=5, backoff_rate=2)
def activate_lv(self, name, is_snapshot=False, permanent=False):
"""Ensure that logical volume/snapshot logical volume is activated.
:param name: Name of LV to activate
:param is_snapshot: whether LV is a snapshot
:param permanent: whether we should drop skipactivation flag
:raises putils.ProcessExecutionError:
"""
# This is a no-op if requested for a snapshot on a version
# of LVM that doesn't support snapshot activation.
# (Assume snapshot LV is always active.)
if is_snapshot and not self.supports_snapshot_lv_activation:
return
lv_path = self.vg_name + '/' + self._mangle_lv_name(name)
# Must pass --yes to activate both the snap LV and its origin LV.
# Otherwise lvchange asks if you would like to do this interactively,
# and fails.
cmd = ['lvchange', '-a', 'y', '--yes']
if self.supports_lvchange_ignoreskipactivation:
# If permanent=True is specified, drop the skipactivation flag in
# order to make this LV automatically activated after next reboot.
if permanent:
cmd += ['-k', 'n']
else:
cmd.append('-K')
cmd.append(lv_path)
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception('Error activating LV')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
@utils.retry(putils.ProcessExecutionError)
def delete(self, name):
"""Delete logical volume or snapshot.
:param name: Name of LV to delete
"""
def run_udevadm_settle():
cinder.privsep.lvm.udevadm_settle()
# LV removal seems to be a race with other writers or udev in
# some cases (see LP #1270192), so we enable retry deactivation
LVM_CONFIG = 'activation { retry_deactivation = 1} '
try:
self._execute(
'lvremove',
'--config', LVM_CONFIG,
'-f',
'%s/%s' % (self.vg_name, name),
root_helper=self._root_helper, run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.debug('Error reported running lvremove: CMD: %(command)s, '
'RESPONSE: %(response)s',
{'command': err.cmd, 'response': err.stderr})
LOG.debug('Attempting udev settle and retry of lvremove...')
run_udevadm_settle()
# The previous failing lvremove -f might leave behind
# suspended devices; when lvmetad is not available, any
# further lvm command will block forever.
# Therefore we need to skip suspended devices on retry.
LVM_CONFIG += 'devices { ignore_suspended_devices = 1}'
self._execute(
'lvremove',
'--config', LVM_CONFIG,
'-f',
'%s/%s' % (self.vg_name, name),
root_helper=self._root_helper, run_as_root=True)
LOG.debug('Successfully deleted volume: %s after '
'udev settle.', name)
def revert(self, snapshot_name):
"""Revert an LV to snapshot.
:param snapshot_name: Name of snapshot to revert
"""
try:
cinder.privsep.lvm.lvconvert(self.vg_name, snapshot_name)
except putils.ProcessExecutionError as err:
LOG.exception('Error Revert Volume')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
def lv_has_snapshot(self, name):
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
'Attr', '--readonly',
'%s/%s' % (self.vg_name, name)]
out, _err = self._run_lvm_command(cmd)
if out:
out = out.strip()
if (out[0] == 'o') or (out[0] == 'O'):
return True
return False
def lv_is_snapshot(self, name):
"""Return True if LV is a snapshot, False otherwise."""
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
'Attr', '%s/%s' % (self.vg_name, name)]
out, _err = self._run_lvm_command(cmd)
out = out.strip()
if out:
if (out[0] == 's'):
return True
return False
def lv_is_open(self, name):
"""Return True if LV is currently open, False otherwise."""
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
'Attr', '%s/%s' % (self.vg_name, name)]
out, _err = self._run_lvm_command(cmd)
out = out.strip()
if out:
if (out[5] == 'o'):
return True
return False
def lv_get_origin(self, name):
"""Return the origin of an LV that is a snapshot, None otherwise."""
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
'Origin', '%s/%s' % (self.vg_name, name)]
out, _err = self._run_lvm_command(cmd)
out = out.strip()
if out:
return out
return None
def extend_volume(self, lv_name, new_size):
"""Extend the size of an existing volume."""
# Volumes with snaps have attributes 'o' or 'O' and will be
# deactivated, but Thin Volumes with snaps have attribute 'V'
# and won't be deactivated because the lv_has_snapshot method looks
# for 'o' or 'O'
has_snapshot = self.lv_has_snapshot(lv_name)
if has_snapshot:
self.deactivate_lv(lv_name)
try:
cmd = LVM.LVM_CMD_PREFIX + ['lvextend', '-L', new_size,
'%s/%s' % (self.vg_name, lv_name)]
self._run_lvm_command(cmd)
except putils.ProcessExecutionError as err:
LOG.exception('Error extending Volume')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
if has_snapshot:
self.activate_lv(lv_name)
def vg_mirror_free_space(self, mirror_count):
free_capacity = 0.0
disks = []
for pv in self.pv_list:
disks.append(float(pv['available']))
while True:
disks = sorted([a for a in disks if a > 0.0], reverse=True)
if len(disks) <= mirror_count:
break
# consume the smallest disk
disk = disks[-1]
disks = disks[:-1]
# match extents for each mirror on the largest disks
for index in list(range(mirror_count)):
disks[index] -= disk
free_capacity += disk
return free_capacity
def vg_mirror_size(self, mirror_count):
return (self.vg_free_space / (mirror_count + 1))
def rename_volume(self, lv_name, new_name):
"""Change the name of an existing volume."""
try:
cinder.privsep.lvm.lvrename(self.vg_name, lv_name, new_name)
except putils.ProcessExecutionError as err:
LOG.exception('Error renaming logical volume')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/cmd/ 0000775 0000000 0000000 00000000000 15131732575 0020377 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/cmd/__init__.py 0000664 0000000 0000000 00000000000 15131732575 0022476 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/cmd/api.py 0000664 0000000 0000000 00000004423 15131732575 0021525 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starter script for Cinder OS API."""
import logging as python_logging
import sys
import eventlet # noqa
eventlet.monkey_patch()
# Monkey patch the original current_thread to use the up-to-date _active
# global variable. See https://bugs.launchpad.net/bugs/1863021 and
# https://github.com/eventlet/eventlet/issues/592
import __original_module_threading as orig_threading # pylint: disable=E0401
import threading # noqa
orig_threading.current_thread.__globals__['_active'] = \
threading._active # type: ignore
from oslo_config import cfg
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
from oslo_reports import opts as gmr_opts
from cinder import i18n # noqa
i18n.enable_lazy()
# Need to register global_opts
from cinder.common import config
from cinder import coordination
from cinder import objects
from cinder import rpc
from cinder import service
from cinder import utils
from cinder import version
CONF = cfg.CONF
def main() -> None:
objects.register_all()
gmr_opts.set_defaults(CONF)
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
config.set_middleware_defaults()
logging.setup(CONF, "cinder")
python_logging.captureWarnings(True)
utils.monkey_patch()
gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)
coordination.COORDINATOR.start()
rpc.init(CONF)
launcher = service.process_launcher()
server = service.WSGIService('osapi_volume')
launcher.launch_service(server, workers=server.workers)
launcher.wait()
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/cmd/backup.py 0000664 0000000 0000000 00000012603 15131732575 0022220 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Starter script for Cinder Volume Backup."""
import logging as python_logging
import shlex
import sys
# NOTE: Monkey patching must go before OSLO.log import, otherwise OSLO.context
# will not use greenthread thread local and all greenthreads will share the
# same context. It's also a good idea to monkey patch everything before
# loading multiprocessing
import eventlet
eventlet.monkey_patch()
# Monkey patch the original current_thread to use the up-to-date _active
# global variable. See https://bugs.launchpad.net/bugs/1863021 and
# https://github.com/eventlet/eventlet/issues/592
import __original_module_threading as orig_threading # pylint: disable=E0401
import threading # noqa
orig_threading.current_thread.__globals__['_active'] = \
threading._active # type: ignore
import typing
from typing import Union
import os_brick
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_privsep import priv_context
from oslo_reports import guru_meditation_report as gmr
from oslo_reports import opts as gmr_opts
# Need to register global_opts
from cinder.common import config # noqa
from cinder.db import api as session
from cinder import i18n
i18n.enable_lazy()
from cinder import objects
from cinder import service
from cinder import utils
from cinder import version
if typing.TYPE_CHECKING:
import oslo_service
CONF = cfg.CONF
backup_cmd_opts = [
cfg.IntOpt('backup_workers',
default=1, min=1, max=processutils.get_worker_count(),
sample_default=8,
help='Number of backup processes to launch. '
'Improves performance with concurrent backups.'),
cfg.IntOpt('backup_max_operations',
default=15,
min=0,
help='Maximum number of concurrent memory heavy operations: '
'backup and restore. Value of 0 means unlimited'),
]
CONF.register_opts(backup_cmd_opts)
LOG = None
# NOTE: The default backup driver uses swift and performs read/write
# operations in a thread. swiftclient will log requests and responses at DEBUG
# level, which can cause a thread switch and break the backup operation. So we
# set a default log level of WARN for swiftclient and boto to try and avoid
# this issue.
_EXTRA_DEFAULT_LOG_LEVELS = ['swiftclient=WARN', 'botocore=WARN']
def _launch_backup_process(launcher: 'oslo_service.ProcessLauncher',
num_process: int,
_semaphore: Union[eventlet.semaphore.Semaphore,
utils.Semaphore]) -> None:
try:
server = service.Service.create(binary='cinder-backup',
coordination=True,
service_name='backup',
process_number=num_process + 1,
semaphore=_semaphore)
except Exception:
assert LOG is not None
LOG.exception('Backup service %s failed to start.', CONF.host)
sys.exit(1)
else:
# Dispose of the whole DB connection pool here before
# starting another process. Otherwise we run into cases where
# child processes share DB connections which results in errors.
session.dispose_engine()
launcher.launch_service(server)
def main() -> None:
objects.register_all()
gmr_opts.set_defaults(CONF)
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.set_defaults(
default_log_levels=logging.get_default_log_levels() +
_EXTRA_DEFAULT_LOG_LEVELS)
logging.setup(CONF, "cinder")
python_logging.captureWarnings(True)
priv_context.init(root_helper=shlex.split(utils.get_root_helper()))
utils.monkey_patch()
gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)
os_brick.setup(CONF)
global LOG
LOG = logging.getLogger(__name__)
semaphore = utils.semaphore_factory(CONF.backup_max_operations,
CONF.backup_workers)
if CONF.backup_workers > 1:
LOG.info('Backup running with %s processes.', CONF.backup_workers)
launcher = service.get_launcher()
for i in range(CONF.backup_workers):
_launch_backup_process(launcher, i, semaphore)
launcher.wait()
else:
LOG.info('Backup running in single process mode.')
server = service.Service.create(binary='cinder-backup',
coordination=True,
service_name='backup',
process_number=1,
semaphore=semaphore)
service.serve(server)
service.wait()
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/cmd/manage.py 0000664 0000000 0000000 00000127262 15131732575 0022213 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Interactive shell based on Django:
#
# Copyright (c) 2005, the Lawrence Journal-World
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""CLI interface for cinder management."""
import collections
import collections.abc as collections_abc
import errno
import glob
import itertools
import logging as python_logging
import os
import re
import sys
import time
import typing
from typing import Any, Callable, Optional, Tuple, Union
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_utils import timeutils
import tabulate
# Need to register global_opts
from cinder.backup import rpcapi as backup_rpcapi
from cinder.common import config # noqa
from cinder import context
from cinder import db
from cinder.db import migration as db_migration
from cinder.db.sqlalchemy import api as db_api
from cinder.db.sqlalchemy import models
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import base as ovo_base
from cinder import quota
from cinder import rpc
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder import version
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import volume_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
RPC_VERSIONS = {
'cinder-scheduler': scheduler_rpcapi.SchedulerAPI.RPC_API_VERSION,
'cinder-volume': volume_rpcapi.VolumeAPI.RPC_API_VERSION,
'cinder-backup': backup_rpcapi.BackupAPI.RPC_API_VERSION,
}
OVO_VERSION = ovo_base.OBJ_VERSIONS.get_current()
# Decorators for actions
@typing.no_type_check
def args(*args, **kwargs):
args = list(args)
if not args[0].startswith('-') and '-' in args[0]:
kwargs.setdefault('metavar', args[0])
args[0] = args[0].replace('-', '_')
def _decorator(func):
func.__dict__.setdefault('args', []).insert(0, (args, kwargs))
return func
return _decorator
class HostCommands(object):
"""List hosts."""
@args('zone', nargs='?', default=None,
help='Availability Zone (default: %(default)s)')
def list(self, zone: Optional[str] = None) -> None:
"""Show a list of all physical hosts.
Can be filtered by zone.
args: [zone]
"""
print(_("%(host)-25s\t%(zone)-15s") % {'host': 'host', 'zone': 'zone'})
ctxt = context.get_admin_context()
services = objects.ServiceList.get_all(ctxt)
if zone:
services = [s for s in services if s.availability_zone == zone]
hosts: list[dict[str, Any]] = []
for srv in services:
if not [h for h in hosts if h['host'] == srv['host']]:
hosts.append(srv)
for h in hosts:
print(_("%(host)-25s\t%(availability_zone)-15s")
% {'host': h['host'],
'availability_zone': h['availability_zone']})
class DbCommands(object):
"""Class for managing the database."""
# NOTE: Online migrations cannot depend on having Cinder services running.
# Migrations can be called during Fast-Forward Upgrades without having any
# Cinder services up.
# NOTE: Online migrations must be removed at the beginning of the next
# release to the one they've been introduced. A comment with the release
# a migration is introduced and the one where it must be removed must
# preceed any element of the "online_migrations" tuple, like this:
# # Added in Queens remove in Rocky
# db.service_uuids_online_data_migration,
online_migrations: Tuple[Callable[[context.RequestContext, int],
Tuple[int, int]], ...] = (
# TODO: (D Release) Remove next line and this comment
db.remove_temporary_admin_metadata_data_migration,
)
def __init__(self):
pass
@args('version', nargs='?', default=None, type=int,
help='Database version')
@args('--bump-versions', dest='bump_versions', default=False,
action='store_true',
help='Update RPC and Objects versions when doing offline upgrades, '
'with this we no longer need to restart the services twice '
'after the upgrade to prevent ServiceTooOld exceptions.')
def sync(self,
version: Optional[int] = None,
bump_versions: bool = False) -> None:
"""Sync the database up to the most recent version."""
if version is not None and version > db.MAX_INT:
print(_('Version should be less than or equal to '
'%(max_version)d.') % {'max_version': db.MAX_INT})
sys.exit(1)
try:
db_migration.db_sync(version)
except db_exc.DBMigrationError as ex:
print("Error during database migration: %s" % ex)
sys.exit(1)
try:
if bump_versions:
ctxt = context.get_admin_context()
services = objects.ServiceList.get_all(ctxt)
for service in services:
rpc_version = RPC_VERSIONS[service.binary]
if (service.rpc_current_version != rpc_version or
service.object_current_version != OVO_VERSION):
service.rpc_current_version = rpc_version
service.object_current_version = OVO_VERSION
service.save()
except Exception as ex:
print(_('Error during service version bump: %s') % ex)
sys.exit(2)
def version(self) -> None:
"""Print the current database version."""
print(db_migration.db_version())
@args('age_in_days', type=int,
help='Purge deleted rows older than age in days')
def purge(self, age_in_days: int) -> None:
"""Purge deleted rows older than a given age from cinder tables."""
age_in_days = int(age_in_days)
if age_in_days < 0:
print(_("Must supply a positive value for age"))
sys.exit(1)
if age_in_days >= (int(time.time()) / 86400):
print(_("Maximum age is count of days since epoch."))
sys.exit(1)
ctxt = context.get_admin_context()
try:
db.purge_deleted_rows(ctxt, age_in_days)
except db_exc.DBReferenceError:
print(_("Purge command failed, check cinder-manage "
"logs for more details."))
sys.exit(1)
def _run_migration(self,
ctxt: context.RequestContext,
max_count: int) -> Tuple[dict, bool]:
ran = 0
exceptions = False
migrations = {}
for migration_meth in self.online_migrations:
count = max_count - ran
try:
found, done = migration_meth(ctxt, count)
except Exception:
msg = (_("Error attempting to run %(method)s") %
{'method': migration_meth.__name__})
print(msg)
LOG.exception(msg)
exceptions = True
found = done = 0
name = migration_meth.__name__
if found:
print(_('%(found)i rows matched query %(meth)s, %(done)i '
'migrated') % {'found': found,
'meth': name,
'done': done})
migrations[name] = found, done
if max_count is not None:
ran += done
if ran >= max_count:
break
return migrations, exceptions
@args('--max_count', metavar='', dest='max_count', type=int,
help='Maximum number of objects to consider.')
def online_data_migrations(self, max_count: Optional[int] = None) -> None:
"""Perform online data migrations for the release in batches."""
ctxt = context.get_admin_context()
if max_count is not None:
unlimited = False
if max_count < 1:
print(_('Must supply a positive value for max_count.'))
sys.exit(127)
else:
unlimited = True
max_count = 50
print(_('Running batches of %i until complete.') % max_count)
ran = None
exceptions = False
migration_info: dict[str, Any] = {}
while ran is None or ran != 0:
migrations, exceptions = self._run_migration(ctxt, max_count)
ran = 0
for name in migrations:
migration_info.setdefault(name, (0, 0))
migration_info[name] = (
max(migration_info[name][0], migrations[name][0]),
migration_info[name][1] + migrations[name][1],
)
ran += migrations[name][1]
if not unlimited:
break
headers = ["{}".format(_('Migration')),
"{}".format(_('Total Needed')),
"{}".format(_('Completed')), ]
rows = []
for name in sorted(migration_info.keys()):
info = migration_info[name]
rows.append([name, info[0], info[1]])
print(tabulate.tabulate(rows, headers=headers, tablefmt='psql'))
# NOTE(imacdonn): In the "unlimited" case, the loop above will only
# terminate when all possible migrations have been effected. If we're
# still getting exceptions, there's a problem that requires
# intervention. In the max-count case, exceptions are only considered
# fatal if no work was done by any other migrations ("not ran"),
# because otherwise work may still remain to be done, and that work
# may resolve dependencies for the failing migrations.
if exceptions and (unlimited or not ran):
print(_("Some migrations failed unexpectedly. Check log for "
"details."))
sys.exit(2)
sys.exit(1 if ran else 0)
@args('--enable-replication', action='store_true', default=False,
help='Set replication status to enabled (default: %(default)s).')
@args('--active-backend-id', default=None,
help='Change the active backend ID (default: %(default)s).')
@args('--backend-host', required=True,
help='The backend host name.')
def reset_active_backend(self,
enable_replication: bool,
active_backend_id: Optional[str],
backend_host: str) -> None:
"""Reset the active backend for a host."""
ctxt = context.get_admin_context()
try:
db.reset_active_backend(ctxt, enable_replication,
active_backend_id, backend_host)
except db_exc.DBReferenceError:
print(_("Failed to reset active backend for host %s, "
"check cinder-manage logs for more details.") %
backend_host)
sys.exit(1)
class QuotaCommands(object):
"""Class for managing quota issues."""
def __init__(self):
pass
@args('--project-id', default=None,
help=('The ID of the project where we want to check the quotas '
'(defaults to all projects).'))
def check(self, project_id: Optional[str]) -> None:
"""Check if quotas and reservations are correct
This action checks quotas and reservations, for a specific project or
for all projects, to see if they are out of sync.
The check will also look for duplicated entries.
One way to use this check in combination with the sync action is to
run the check for all projects, take note of those that are out of
sync, and then sync them one by one at intervals to reduce stress on
the DB.
"""
result = self._check_sync(project_id, do_fix=False)
if result:
sys.exit(1)
@args('--project-id', default=None,
help=('The ID of the project where we want to sync the quotas '
'(defaults to all projects).'))
def sync(self, project_id: Optional[str]) -> None:
"""Fix quotas and reservations
This action refreshes existing quota usage and reservation count for a
specific project or for all projects.
The refresh will also remove duplicated entries.
This operation is best executed when Cinder is not running, but it can
be run with cinder services running as well.
A different transaction is used for each project's quota sync, so an
action failure will only rollback the current project's changes.
"""
self._check_sync(project_id, do_fix=True)
@db_api.main_context_manager.reader
def _get_quota_projects(self,
ctxt: context.RequestContext,
project_id: Optional[str]) -> list[str]:
"""Get project ids that have quota_usage entries."""
if project_id:
model = models.QuotaUsage
# If the project does not exist
if not ctxt.session.query(
db_api.sql.exists()
.where(
db_api.and_(
model.project_id == project_id,
~model.deleted,
),
)
).scalar():
print(
'Project id %s has no quota usage. Nothing to do.' %
project_id,
)
return []
return [project_id]
projects = db_api.get_projects(ctxt,
models.QuotaUsage,
read_deleted="no")
project_ids = [row.project_id for row in projects]
return project_ids
def _get_usages(self,
ctxt: context.RequestContext,
resources,
project_id: str) -> list:
"""Get data necessary to check out of sync quota usage.
Returns a list of QuotaUsage instances for the specific project
"""
usages = db_api.model_query(
ctxt,
db_api.models.QuotaUsage,
read_deleted="no",
).filter_by(project_id=project_id).with_for_update().all()
return usages
def _get_reservations(self,
ctxt: context.RequestContext,
project_id: str,
usage_id: str) -> list:
"""Get reservations for a given project and usage id."""
reservations = (
db_api.model_query(
ctxt,
models.Reservation,
read_deleted="no",
)
.filter_by(project_id=project_id, usage_id=usage_id)
.with_for_update()
.all()
)
return reservations
def _check_duplicates(self,
ctxt: context.RequestContext,
usages,
do_fix: bool) -> tuple[list, bool]:
"""Look for duplicated quota used entries (bug#1484343)
If we have duplicates and we are fixing them, then we reassign the
reservations of the usage we are removing.
"""
resources = collections.defaultdict(list)
for usage in usages:
resources[usage.resource].append(usage)
duplicates_found = False
result = []
for resource_usages in resources.values():
keep_usage = resource_usages[0]
if len(resource_usages) > 1:
duplicates_found = True
print('\t%s: %s duplicated usage entries - ' %
(keep_usage.resource, len(resource_usages) - 1),
end='')
if do_fix:
# Each of the duplicates can have reservations
reassigned = 0
for usage in resource_usages[1:]:
reservations = self._get_reservations(
ctxt,
usage.project_id,
usage.id,
)
reassigned += len(reservations)
for reservation in reservations:
reservation.usage_id = keep_usage.id
keep_usage.in_use += usage.in_use
keep_usage.reserved += usage.reserved
usage.delete(ctxt.session)
print('duplicates removed & %s reservations reassigned' %
reassigned)
else:
print('ignored')
result.append(keep_usage)
return result, duplicates_found
def _check_sync(self, project_id: Optional[str], do_fix: bool) -> bool:
"""Check the quotas and reservations optionally fixing them."""
ctxt = context.get_admin_context()
# Get the quota usage types and their sync methods
resources = quota.QUOTAS.resources
resources.update(quota.GROUP_QUOTAS.resources)
# Get all project ids that have quota usage. Method doesn't lock
# projects, since newly added projects should not be out of sync and
# projects removed will just turn nothing on the quota usage.
projects = self._get_quota_projects(ctxt, project_id)
discrepancy = False
for project in projects:
discrepancy &= self._check_project_sync(
ctxt,
project,
do_fix,
resources,
)
print('Action successfully completed')
return discrepancy
@db_api.main_context_manager.writer
def _check_project_sync(self,
ctxt: context.RequestContext,
project: str,
do_fix: bool,
resources) -> bool:
print('Processing quota usage for project %s' % project)
discrepancy = False
action_msg = ' - fixed' if do_fix else ''
# NOTE: It's important to always get the quota first and then the
# reservations to prevent deadlocks with quota commit and rollback from
# running Cinder services.
# We only want to sync existing quota usage rows
usages = self._get_usages(ctxt, resources, project)
# Check for duplicated entries (bug#1484343)
usages, duplicates_found = self._check_duplicates(
ctxt, usages, do_fix,
)
if duplicates_found:
discrepancy = True
# Check quota and reservations
for usage in usages:
resource_name = usage.resource
# Get the correct value for this quota usage resource
updates = db_api._get_sync_updates(
ctxt,
project,
resources,
resource_name,
)
in_use = updates[resource_name]
if in_use != usage.in_use:
print(
'\t%s: invalid usage saved=%s actual=%s%s' %
(resource_name, usage.in_use, in_use, action_msg)
)
discrepancy = True
if do_fix:
usage.in_use = in_use
reservations = self._get_reservations(
ctxt,
project,
usage.id,
)
num_reservations = sum(
r.delta for r in reservations if r.delta > 0
)
if num_reservations != usage.reserved:
print(
'\t%s: invalid reserved saved=%s actual=%s%s' %
(
resource_name,
usage.reserved,
num_reservations,
action_msg,
)
)
discrepancy = True
if do_fix:
usage.reserved = num_reservations
return discrepancy
class VersionCommands(object):
"""Class for exposing the codebase version."""
def __init__(self):
pass
def list(self):
print(version.version_string())
def __call__(self):
self.list()
class VolumeCommands(object):
"""Methods for dealing with a cloud in an odd state."""
@args('volume_id',
help='Volume ID to be deleted')
def delete(self, volume_id: str) -> None:
"""Delete a volume, bypassing the check that it must be available."""
ctxt = context.get_admin_context()
volume = objects.Volume.get_by_id(ctxt, volume_id)
host = volume_utils.extract_host(volume.host) if volume.host else None
if not host:
print(_("Volume not yet assigned to host."))
print(_("Deleting volume from database and skipping rpc."))
volume.destroy()
return
if volume.status == 'in-use':
print(_("Volume is in-use."))
print(_("Detach volume from instance and then try again."))
return
rpc.init(CONF)
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.delete_volume(ctxt, volume)
@args('--currenthost', required=True, help='Existing volume host name in '
'the format host@backend#pool')
@args('--newhost', required=True, help='New volume host name in the '
'format host@backend#pool')
def update_host(self, currenthost: str, newhost: str) -> None:
"""Modify the host name associated with a volume.
Particularly to recover from cases where one has moved
their Cinder Volume node, or modified their backend_name in a
multi-backend config.
"""
ctxt = context.get_admin_context()
volumes = db.volume_get_all_by_host(ctxt,
currenthost)
for v in volumes:
db.volume_update(ctxt, v['id'],
{'host': newhost})
def update_service(self):
"""Modify the service uuid associated with a volume.
In certain upgrade cases, we create new cinder services and delete the
records of old ones, however, the volumes created with old service
still contain the service uuid of the old services.
"""
ctxt = context.get_admin_context()
db.volume_update_all_by_service(ctxt)
class ConfigCommands(object):
"""Class for exposing the flags defined by flag_file(s)."""
def __init__(self):
pass
@args('param', nargs='?', default=None,
help='Configuration parameter to display (default: %(default)s)')
def list(self, param: Optional[str] = None) -> None:
"""List parameters configured for cinder.
Lists all parameters configured for cinder unless an optional argument
is specified. If the parameter is specified we only print the
requested parameter. If the parameter is not found an appropriate
error is produced by .get*().
"""
param = param and param.strip()
if param:
print('%s = %s' % (param, CONF.get(param)))
else:
for key, value in CONF.items():
print('%s = %s' % (key, value))
class BackupCommands(object):
"""Methods for managing backups."""
def list(self) -> None:
"""List all backups.
List all backups (including ones in progress) and the host
on which the backup operation is running.
"""
ctxt = context.get_admin_context()
backups = objects.BackupList.get_all(ctxt)
hdr = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s\t%-12s"
print(hdr % (_('ID'),
_('User ID'),
_('Project ID'),
_('Host'),
_('Name'),
_('Container'),
_('Status'),
_('Size'),
_('Object Count')))
res = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12d\t%-12d"
for backup in backups:
object_count = 0
if backup['object_count'] is not None:
object_count = backup['object_count']
print(res % (backup['id'],
backup['user_id'],
backup['project_id'],
backup['host'],
backup['display_name'],
backup['container'],
backup['status'],
backup['size'],
object_count))
@args('--currenthost', required=True, help='Existing backup host name')
@args('--newhost', required=True, help='New backup host name')
def update_backup_host(self, currenthost: str, newhost: str) -> None:
"""Modify the host name associated with a backup.
Particularly to recover from cases where one has moved
their Cinder Backup node, and not set backup_use_same_backend.
"""
ctxt = context.get_admin_context()
backups = objects.BackupList.get_all_by_host(ctxt, currenthost)
for bk in backups:
bk.host = newhost
bk.save()
class BaseCommand(object):
@staticmethod
def _normalize_time(time_field):
return time_field and timeutils.normalize_time(time_field)
@staticmethod
def _state_repr(is_up):
return ':-)' if is_up else 'XXX'
class ServiceCommands(BaseCommand):
"""Methods for managing services."""
def list(self):
"""Show a list of all cinder services."""
ctxt = context.get_admin_context()
services = objects.ServiceList.get_all(ctxt)
print_format = "%-16s %-36s %-16s %-10s %-5s %-20s %-12s %-15s %-36s"
print(print_format % (_('Binary'),
_('Host'),
_('Zone'),
_('Status'),
_('State'),
_('Updated At'),
_('RPC Version'),
_('Object Version'),
_('Cluster')))
for svc in services:
art = self._state_repr(svc.is_up)
status = 'disabled' if svc.disabled else 'enabled'
updated_at = self._normalize_time(svc.updated_at)
rpc_version = svc.rpc_current_version
object_version = svc.object_current_version
cluster = svc.cluster_name or ''
print(print_format % (svc.binary, svc.host,
svc.availability_zone, status, art,
updated_at, rpc_version, object_version,
cluster))
@args('binary', type=str,
help='Service to delete from the host.')
@args('host_name', type=str,
help='Host from which to remove the service.')
def remove(self, binary: str, host_name: str) -> Optional[int]:
"""Completely removes a service."""
ctxt = context.get_admin_context()
try:
svc = objects.Service.get_by_args(ctxt, host_name, binary)
svc.destroy()
except exception.ServiceNotFound as e:
print(_("Host not found. Failed to remove %(service)s"
" on %(host)s.") %
{'service': binary, 'host': host_name})
print(u"%s" % e.args)
return 2
print(_("Service %(service)s on host %(host)s removed.") %
{'service': binary, 'host': host_name})
return None
class ClusterCommands(BaseCommand):
"""Methods for managing clusters."""
def list(self) -> None:
"""Show a list of all cinder services."""
ctxt = context.get_admin_context()
clusters = objects.ClusterList.get_all(ctxt, services_summary=True)
print_format = "%-36s %-16s %-10s %-5s %-20s %-7s %-12s %-20s"
print(print_format % (_('Name'),
_('Binary'),
_('Status'),
_('State'),
_('Heartbeat'),
_('Hosts'),
_('Down Hosts'),
_('Updated At')))
for cluster in clusters:
art = self._state_repr(cluster.is_up)
status = 'disabled' if cluster.disabled else 'enabled'
heartbeat = self._normalize_time(cluster.last_heartbeat)
updated_at = self._normalize_time(cluster.updated_at)
print(print_format % (cluster.name, cluster.binary, status, art,
heartbeat, cluster.num_hosts,
cluster.num_down_hosts, updated_at))
@args('--recursive', action='store_true', default=False,
help='Delete associated hosts.')
@args('binary', type=str,
help='Service to delete from the cluster.')
@args('cluster-name', type=str, help='Cluster to delete.')
def remove(self,
recursive: bool,
binary: str,
cluster_name: str) -> Optional[int]:
"""Completely removes a cluster."""
ctxt = context.get_admin_context()
try:
cluster = objects.Cluster.get_by_id(ctxt, None, name=cluster_name,
binary=binary,
get_services=recursive)
except exception.ClusterNotFound:
print(_("Couldn't remove cluster %s because it doesn't exist.") %
cluster_name)
return 2
if recursive:
for service in cluster.services:
service.destroy()
try:
cluster.destroy()
except exception.ClusterHasHosts:
print(_("Couldn't remove cluster %s because it still has hosts.") %
cluster_name)
return 2
msg = _('Cluster %s successfully removed.') % cluster_name
if recursive:
msg = (_('%(msg)s And %(num)s services from the cluster were also '
'removed.') % {'msg': msg, 'num': len(cluster.services)})
print(msg)
return None
@args('--full-rename', dest='partial',
action='store_false', default=True,
help='Do full cluster rename instead of just replacing provided '
'current cluster name and preserving backend and/or pool info.')
@args('current', help='Current cluster name.')
@args('new', help='New cluster name.')
def rename(self,
partial: bool,
current: Optional[str],
new: Optional[str]) -> Optional[int]:
"""Rename cluster name for Volumes and Consistency Groups.
Useful when you want to rename a cluster, particularly when the
backend_name has been modified in a multi-backend config or we have
moved from a single backend to multi-backend.
"""
ctxt = context.get_admin_context()
# Convert empty strings to None
current = current or None
new = new or None
# Update Volumes
num_vols = objects.VolumeList.include_in_cluster(
ctxt, new, partial_rename=partial, cluster_name=current)
# Update Consistency Groups
num_cgs = objects.ConsistencyGroupList.include_in_cluster(
ctxt, new, partial_rename=partial, cluster_name=current)
if num_vols or num_cgs:
msg = _('Successfully renamed %(num_vols)s volumes and '
'%(num_cgs)s consistency groups from cluster %(current)s '
'to %(new)s')
print(msg % {'num_vols': num_vols, 'num_cgs': num_cgs, 'new': new,
'current': current})
else:
msg = _('No volumes or consistency groups exist in cluster '
'%(current)s.')
print(msg % {'current': current})
return 2
return None
class ConsistencyGroupCommands(object):
"""Methods for managing consistency groups."""
@args('--currenthost', required=True, help='Existing CG host name')
@args('--newhost', required=True, help='New CG host name')
def update_cg_host(self, currenthost: str, newhost: str) -> None:
"""Modify the host name associated with a Consistency Group.
Particularly to recover from cases where one has moved
a host from single backend to multi-backend, or changed the host
configuration option, or modified the backend_name in a multi-backend
config.
"""
ctxt = context.get_admin_context()
groups = objects.ConsistencyGroupList.get_all(
ctxt, {'host': currenthost})
for gr in groups:
gr.host = newhost
gr.save()
class UtilCommands(object):
"""Generic utils."""
@staticmethod
def _get_resources_locks() -> Tuple[collections.defaultdict,
collections.defaultdict,
collections.defaultdict]:
"""Get all vol/snap/backup file lock paths."""
backup_locks_prefix = 'cinder-cleanup_incomplete_backups_'
oslo_dir = os.path.abspath(cfg.CONF.oslo_concurrency.lock_path)
filenames = glob.glob(os.path.join(oslo_dir, 'cinder-*'))
backend_url = cfg.CONF.coordination.backend_url
if backend_url.startswith('file://'):
tooz_dir = os.path.abspath(backend_url[7:])
if tooz_dir != oslo_dir:
filenames += glob.glob(os.path.join(tooz_dir, 'cinder-*'))
volumes: collections.defaultdict = collections.defaultdict(list)
snapshots: collections.defaultdict = collections.defaultdict(list)
backups = collections.defaultdict(list)
matcher = re.compile('.*?([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-'
'[0-9a-f]{4}-[0-9a-f]{12}).*?', re.IGNORECASE)
for filename in filenames:
basename = os.path.basename(filename)
match = matcher.match(basename)
if match:
dest = snapshots if 'snapshot' in basename else volumes
res_id = match.group(1)
dest[res_id].append(filename)
elif basename.startswith(backup_locks_prefix):
pgrp = basename[34:]
backups[pgrp].append(filename)
return volumes, snapshots, backups
def _exclude_running_backups(self, backups: dict) -> None:
"""Remove backup entries from the dict for running backup services."""
for backup_pgrp in list(backups.keys()):
# The PGRP is the same as the PID of the parent process, so we know
# the lock could be in use if the process is running and it's the
# cinder-backup command (the PID could have been reused).
cmdline_file = os.path.join('/proc', backup_pgrp, 'cmdline')
try:
with open(cmdline_file, 'r') as f:
if 'cinder-backup' in f.read():
del backups[backup_pgrp]
except FileNotFoundError:
continue
except Exception:
# Unexpected error, leaving the lock file just in case
del backups[backup_pgrp]
@args('--services-offline', dest='online',
action='store_false', default=True,
help='All locks can be deleted as Cinder services are not running.')
def clean_locks(self, online: bool) -> None:
"""Clean file locks for vols, snaps, and backups on the current host.
Should be run on any host where we are running a Cinder service (API,
Scheduler, Volume, Backup) and can be run with the Cinder services
running or stopped.
If the services are running it will check existing resources in the
Cinder database in order to know which resources are still available
(it's not safe to remove their file locks) and will only remove the
file locks for the resources that are no longer present. Deleting
locks while the services are offline is faster as there's no need to
check the database.
For backups, the way to know if we can remove the startup lock is by
checking if the PGRP in the file name is currently running
cinder-backup.
Default assumes that services are online, must pass
``--services-offline`` to specify that they are offline.
Doesn't clean DLM locks (except when using file locks), as those don't
leave lock leftovers.
"""
self.ctxt = context.get_admin_context()
# Find volume and snapshots ids, and backups PGRP based on the existing
# file locks
volumes: Union[collections.defaultdict, dict]
snapshots: Union[collections.defaultdict, dict]
volumes, snapshots, backups = self._get_resources_locks()
# If services are online we cannot delete locks for existing resources
if online:
# We don't want to delete file locks for existing resources
volumes = {vol_id: files for vol_id, files in volumes.items()
if not objects.Volume.exists(self.ctxt, vol_id)}
snapshots = {snap_id: files for snap_id, files in snapshots.items()
if not objects.Snapshot.exists(self.ctxt, snap_id)}
self._exclude_running_backups(backups)
def _err(filename: str, exc: Exception) -> None:
print('Failed to cleanup lock %(name)s: %(exc)s',
{'name': filename, 'exc': exc})
# Now clean
for filenames in itertools.chain(volumes.values(),
snapshots.values(),
backups.values()):
for filename in filenames:
try:
os.remove(filename)
except OSError as exc:
if (exc.errno != errno.ENOENT):
_err(filename, exc)
except Exception as exc:
_err(filename, exc)
CATEGORIES = {
'backup': BackupCommands,
'config': ConfigCommands,
'cluster': ClusterCommands,
'cg': ConsistencyGroupCommands,
'db': DbCommands,
'host': HostCommands,
'quota': QuotaCommands,
'service': ServiceCommands,
'version': VersionCommands,
'volume': VolumeCommands,
'util': UtilCommands,
}
def methods_of(obj) -> list:
"""Return non-private methods from an object.
Get all callable methods of an object that don't start with underscore
:return: a list of tuples of the form (method_name, method)
"""
result = []
for i in dir(obj):
if (isinstance(getattr(obj, i),
collections_abc.Callable) and # type: ignore
not i.startswith('_')):
result.append((i, getattr(obj, i)))
return result
def missing_action(help_func: Callable) -> Callable:
def wrapped():
help_func()
exit(2)
return wrapped
def add_command_parsers(subparsers):
for category in sorted(CATEGORIES):
command_object = CATEGORIES[category]()
parser = subparsers.add_parser(category)
parser.set_defaults(command_object=command_object)
parser.set_defaults(action_fn=missing_action(parser.print_help))
category_subparsers = parser.add_subparsers(dest='action')
for (action, action_fn) in methods_of(command_object):
parser = category_subparsers.add_parser(action)
action_kwargs: list = []
for args, kwargs in getattr(action_fn, 'args', []):
parser.add_argument(*args, **kwargs)
parser.set_defaults(action_fn=action_fn)
parser.set_defaults(action_kwargs=action_kwargs)
category_opt = cfg.SubCommandOpt('category',
title='Command categories',
handler=add_command_parsers)
def get_arg_string(args):
if args[0] == '-':
# (Note)zhiteng: args starts with FLAGS.oparser.prefix_chars
# is optional args. Notice that cfg module takes care of
# actual ArgParser so prefix_chars is always '-'.
if args[1] == '-':
# This is long optional arg
args = args[2:]
else:
args = args[1:] # pylint: disable=E1136
# We convert dashes to underscores so we can have cleaner optional arg
# names
if args:
args = args.replace('-', '_')
return args
def fetch_func_args(func):
fn_kwargs = {}
for args, kwargs in getattr(func, 'args', []):
# Argparser `dest` configuration option takes precedence for the name
arg = kwargs.get('dest') or get_arg_string(args[0])
fn_kwargs[arg] = getattr(CONF.category, arg)
return fn_kwargs
def main():
objects.register_all()
"""Parse options and call the appropriate class/method."""
CONF.register_cli_opt(category_opt)
script_name = sys.argv[0]
if len(sys.argv) < 2:
print(_("\nOpenStack Cinder version: %(version)s\n") %
{'version': version.version_string()})
print(script_name + " category action []")
print(_("Available categories:"))
for category in CATEGORIES:
print(_("\t%s") % category)
sys.exit(2)
try:
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.setup(CONF, "cinder")
python_logging.captureWarnings(True)
except cfg.ConfigDirNotFoundError as details:
print(_("Invalid directory: %s") % details)
sys.exit(2)
except cfg.ConfigFilesNotFoundError as e:
cfg_files = e.config_files
print(_("Failed to read configuration file(s): %s") % cfg_files)
sys.exit(2)
fn = CONF.category.action_fn
fn_kwargs = fetch_func_args(fn)
fn(**fn_kwargs)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/cmd/rtstool.py 0000664 0000000 0000000 00000025014 15131732575 0022461 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# Copyright 2012 - 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import rtslib_fb
from cinder import i18n
from cinder.i18n import _
i18n.enable_lazy()
class RtstoolError(Exception):
pass
class RtstoolImportError(RtstoolError):
pass
def create(backing_device, name, userid, password, iser_enabled,
initiator_iqns=None, portals_ips=None, portals_port=3260):
# List of IPS that will not raise an error when they fail binding.
# Originally we will fail on all binding errors.
ips_allow_fail = ()
try:
rtsroot = rtslib_fb.root.RTSRoot()
except rtslib_fb.utils.RTSLibError:
print(_('Ensure that configfs is mounted at /sys/kernel/config.'))
raise
# Look to see if BlockStorageObject already exists
for x in rtsroot.storage_objects:
if x.name == name:
# Already exists, use this one
return
so_new = rtslib_fb.BlockStorageObject(name=name,
dev=backing_device)
target_new = rtslib_fb.Target(rtslib_fb.FabricModule('iscsi'), name,
'create')
tpg_new = rtslib_fb.TPG(target_new, mode='create')
tpg_new.set_attribute('authentication', '1')
lun_new = rtslib_fb.LUN(tpg_new, storage_object=so_new)
if initiator_iqns:
initiator_iqns = initiator_iqns.strip(' ')
for i in initiator_iqns.split(','):
acl_new = rtslib_fb.NodeACL(tpg_new, i, mode='create')
acl_new.chap_userid = userid
acl_new.chap_password = password
rtslib_fb.MappedLUN(acl_new, lun_new.lun, lun_new.lun)
tpg_new.enable = 1
# If no ips are given we'll bind to all IPv4 and v6
if not portals_ips:
portals_ips = ('0.0.0.0', '[::0]')
# TODO(emh): Binding to IPv6 fails sometimes -- let pass for now.
ips_allow_fail = ('[::0]',)
for ip in portals_ips:
try:
# rtslib expects IPv6 addresses to be surrounded by brackets
portal = rtslib_fb.NetworkPortal(tpg_new, _canonicalize_ip(ip),
portals_port, mode='any')
except rtslib_fb.utils.RTSLibError:
raise_exc = ip not in ips_allow_fail
msg_type = 'Error' if raise_exc else 'Warning'
print(_('%(msg_type)s: creating NetworkPortal: ensure port '
'%(port)d on ip %(ip)s is not in use by another service.')
% {'msg_type': msg_type, 'port': portals_port, 'ip': ip})
if raise_exc:
raise
else:
try:
if iser_enabled == 'True':
portal.iser = True
except rtslib_fb.utils.RTSLibError:
print(_('Error enabling iSER for NetworkPortal: please ensure '
'that RDMA is supported on your iSCSI port %(port)d '
'on ip %(ip)s.') % {'port': portals_port, 'ip': ip})
raise
def _lookup_target(target_iqn, initiator_iqn):
try:
rtsroot = rtslib_fb.root.RTSRoot()
except rtslib_fb.utils.RTSLibError:
print(_('Ensure that configfs is mounted at /sys/kernel/config.'))
raise
# Look for the target
for t in rtsroot.targets:
if t.wwn == target_iqn:
return t
raise RtstoolError(_('Could not find target %s') % target_iqn)
def add_initiator(target_iqn, initiator_iqn, userid, password):
target = _lookup_target(target_iqn, initiator_iqn)
tpg = next(target.tpgs) # get the first one
for acl in tpg.node_acls:
# See if this ACL configuration already exists
if acl.node_wwn.lower() == initiator_iqn.lower():
# No further action required
return
acl_new = rtslib_fb.NodeACL(tpg, initiator_iqn, mode='create')
acl_new.chap_userid = userid
acl_new.chap_password = password
rtslib_fb.MappedLUN(acl_new, 0, tpg_lun=0)
def delete_initiator(target_iqn, initiator_iqn):
target = _lookup_target(target_iqn, initiator_iqn)
tpg = next(target.tpgs) # get the first one
for acl in tpg.node_acls:
if acl.node_wwn.lower() == initiator_iqn.lower():
acl.delete()
return
print(_('delete_initiator: %s ACL not found. Continuing.') % initiator_iqn)
# Return successfully.
def get_targets():
rtsroot = rtslib_fb.root.RTSRoot()
for x in rtsroot.targets:
print(x.wwn)
def delete(iqn):
rtsroot = rtslib_fb.root.RTSRoot()
for x in rtsroot.targets:
if x.wwn == iqn:
x.delete()
break
for x in rtsroot.storage_objects:
if x.name == iqn:
x.delete()
break
def verify_rtslib():
for member in ['BlockStorageObject', 'FabricModule', 'LUN',
'MappedLUN', 'NetworkPortal', 'NodeACL', 'root',
'Target', 'TPG']:
if not hasattr(rtslib_fb, member):
raise RtstoolImportError(_("rtslib_fb is missing member %s: You "
"may need a newer python-rtslib-fb.") %
member)
def usage():
print("Usage:")
print(sys.argv[0] +
" create [device] [name] [userid] [password] [iser_enabled]" +
" [-a] [-pPORT]")
print(sys.argv[0] +
" add-initiator [target_iqn] [userid] [password] [initiator_iqn]")
print(sys.argv[0] +
" delete-initiator [target_iqn] [initiator_iqn]")
print(sys.argv[0] + " get-targets")
print(sys.argv[0] + " delete [iqn]")
print(sys.argv[0] + " verify")
print(sys.argv[0] + " save [path_to_file]")
sys.exit(1)
def save_to_file(destination_file):
rtsroot = rtslib_fb.root.RTSRoot()
try:
# If default destination use rtslib default save file
if not destination_file:
destination_file = rtslib_fb.root.default_save_file
path_to_file = os.path.dirname(destination_file)
# NOTE(geguileo): With default file we ensure path exists and
# create it if doesn't.
# Cinder's LIO target helper runs this as root, so it will have no
# problem creating directory /etc/target.
# If run manually from the command line without being root you will
# get an error, same as when creating and removing targets.
if not os.path.exists(path_to_file):
os.makedirs(path_to_file, 0o755)
except OSError as exc:
raise RtstoolError(_('targetcli not installed and could not create '
'default directory (%(default_path)s): %(exc)s') %
{'default_path': path_to_file, 'exc': exc})
try:
rtsroot.save_to_file(destination_file)
except (OSError, IOError) as exc:
raise RtstoolError(_('Could not save configuration to %(file_path)s: '
'%(exc)s') %
{'file_path': destination_file, 'exc': exc})
def restore_from_file(configuration_file):
rtsroot = rtslib_fb.root.RTSRoot()
# If configuration file is None, use rtslib default save file.
if not configuration_file:
configuration_file = rtslib_fb.root.default_save_file
try:
rtsroot.restore_from_file(configuration_file)
except (OSError, IOError) as exc:
raise RtstoolError(_('Could not restore configuration file '
'%(file_path)s: %(exc)s'),
{'file_path': configuration_file, 'exc': exc})
def parse_optional_create(argv):
optional_args = {}
for arg in argv:
if arg.startswith('-a'):
ips = [ip for ip in arg[2:].split(',') if ip]
if not ips:
usage()
optional_args['portals_ips'] = ips
elif arg.startswith('-p'):
try:
optional_args['portals_port'] = int(arg[2:])
except ValueError:
usage()
else:
optional_args['initiator_iqns'] = arg
return optional_args
def _canonicalize_ip(ip):
if ip.startswith('[') or "." in ip:
return ip
return "[" + ip + "]"
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) < 2:
usage()
if argv[1] == 'create':
if len(argv) < 7:
usage()
if len(argv) > 10:
usage()
backing_device = argv[2]
name = argv[3]
userid = argv[4]
password = argv[5]
iser_enabled = argv[6]
if len(argv) > 7:
optional_args = parse_optional_create(argv[7:])
else:
optional_args = {}
create(backing_device, name, userid, password, iser_enabled,
**optional_args)
elif argv[1] == 'add-initiator':
if len(argv) < 6:
usage()
target_iqn = argv[2]
userid = argv[3]
password = argv[4]
initiator_iqn = argv[5]
add_initiator(target_iqn, initiator_iqn, userid, password)
elif argv[1] == 'delete-initiator':
if len(argv) < 4:
usage()
target_iqn = argv[2]
initiator_iqn = argv[3]
delete_initiator(target_iqn, initiator_iqn)
elif argv[1] == 'get-targets':
get_targets()
elif argv[1] == 'delete':
if len(argv) < 3:
usage()
iqn = argv[2]
delete(iqn)
elif argv[1] == 'verify':
# This is used to verify that this script can be called by cinder,
# and that rtslib_fb is new enough to work.
verify_rtslib()
return 0
elif argv[1] == 'save':
if len(argv) > 3:
usage()
destination_file = argv[2] if len(argv) > 2 else None
save_to_file(destination_file)
return 0
elif argv[1] == 'restore':
if len(argv) > 3:
usage()
configuration_file = argv[2] if len(argv) > 2 else None
restore_from_file(configuration_file)
return 0
else:
usage()
return 0
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/cmd/scheduler.py 0000664 0000000 0000000 00000004072 15131732575 0022732 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Starter script for Cinder Scheduler."""
import logging as python_logging
import sys
import eventlet
eventlet.monkey_patch()
# Monkey patch the original current_thread to use the up-to-date _active
# global variable. See https://bugs.launchpad.net/bugs/1863021 and
# https://github.com/eventlet/eventlet/issues/592
import __original_module_threading as orig_threading # pylint: disable=E0401
import threading # noqa
orig_threading.current_thread.__globals__['_active'] = \
threading._active # type: ignore
from oslo_config import cfg
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
from oslo_reports import opts as gmr_opts
# Need to register global_opts
from cinder.common import config # noqa
from cinder import i18n
i18n.enable_lazy()
from cinder import objects
from cinder import service
from cinder import utils
from cinder import version
CONF = cfg.CONF
def main() -> None:
objects.register_all()
gmr_opts.set_defaults(CONF)
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.setup(CONF, "cinder")
python_logging.captureWarnings(True)
utils.monkey_patch()
gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)
server = service.Service.create(binary='cinder-scheduler')
service.serve(server)
service.wait()
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/cmd/status.py 0000664 0000000 0000000 00000027062 15131732575 0022303 0 ustar 00root root 0000000 0000000 # Copyright 2018 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""CLI interface for cinder status commands."""
import os
import sys
from oslo_config import cfg
from oslo_upgradecheck import upgradecheck as uc
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder.policy import DEFAULT_POLICY_FILENAME
# Need to import service to load config
from cinder import service # noqa
# We must first register Cinder's objects. Otherwise
# we cannot import the volume manager.
objects.register_all()
import cinder.volume.manager as volume_manager
CONF = cfg.CONF
SUCCESS = uc.Code.SUCCESS
FAILURE = uc.Code.FAILURE
WARNING = uc.Code.WARNING
REMOVED_DRVRS = [
"coprhd",
"drbdmanage",
"disco",
"hgst",
"hpe_lefthand",
"sheepdog",
"zfssa",
]
def _get_enabled_drivers() -> list[str]:
"""Returns a list of volume_driver entries"""
volume_drivers = []
if CONF.enabled_backends:
backend: str
for backend in filter(None, CONF.enabled_backends):
# Each backend group needs to be registered first
CONF.register_opts(volume_manager.volume_backend_opts,
group=backend)
volume_driver = CONF[backend]['volume_driver']
volume_drivers.append(volume_driver)
return volume_drivers
class Checks(uc.UpgradeCommands):
"""Upgrade checks to run."""
def __init__(self, *args, **kwargs):
super(Checks, self).__init__(*args, **kwargs)
self.context = context.get_admin_context()
def _file_exists(self, path: str) -> bool:
"""Helper for mocking check of os.path.exists."""
return os.path.exists(path)
def _check_backup_module(self) -> uc.Result:
"""Checks for the use of backup driver module paths.
The use of backup modules for setting backup_driver was deprecated and
we now only allow the full driver path. This checks that there are not
any remaining settings using the old method.
"""
# We import here to avoid conf loading order issues with cinder.service
# above.
import cinder.backup.manager # noqa
backup_driver = CONF.backup_driver
# Easy check in that a class name will have mixed casing
if backup_driver == backup_driver.lower():
return uc.Result(
FAILURE,
'Backup driver configuration requires the full path to the '
'driver, but current setting is using only the module path.')
return uc.Result(SUCCESS)
def _check_policy_file(self) -> uc.Result:
"""Checks if a policy.json file is present.
With the switch to policy-in-code, policy files should be policy.yaml
and should only be present if overriding default policy. Just checks
and warns if the old file is present to make sure they are aware it is
not being used.
"""
# make sure we know where to look for the policy file
config_dir = CONF.find_file('cinder.conf')
if not config_dir:
return uc.Result(
WARNING,
'Cannot locate your cinder configuration directory. '
'Please re-run using the --config-dir option.')
policy_file = CONF.oslo_policy.policy_file
json_file = os.path.join(os.path.dirname(config_dir), 'policy.json')
if policy_file == DEFAULT_POLICY_FILENAME:
# Default is being used, check for old json file
if self._file_exists(json_file):
return uc.Result(
WARNING,
'policy.json file is present. Make sure any changes from '
'the default policies are present in a policy.yaml file '
'instead. If you really intend to use a policy.json file, '
'make sure that its absolute path is set as the value of '
"the 'policy_file' configuration option in the "
'[oslo_policy] section of your cinder.conf file.')
else:
# They have configured a custom policy file. It is OK if it does
# not exist, but we should check and warn about it while we're
# checking.
if not policy_file.startswith('/'):
# policy_file is relative to config_dir
policy_file = os.path.join(os.path.dirname(config_dir),
policy_file)
if not self._file_exists(policy_file):
return uc.Result(
WARNING,
"Configured policy file '%s' does not exist. This may be "
"expected, but default policies will be used until any "
"desired overrides are added to the configured file." %
policy_file)
return uc.Result(SUCCESS)
def _check_periodic_interval(self) -> uc.Result:
"""Checks for non-default use of periodic_interval.
Some new configuration options have been introduced to supplement
periodic_interval, which was being used for multiple, possibly
conflicting purposes. If a non-default value for periodic_interval
is configured, warn the operator to review whether one of the new
options is better suited for the periodic task(s) being tuned.
"""
periodic_interval = CONF.periodic_interval
if periodic_interval != 60:
return uc.Result(
WARNING,
"Detected non-default value for the 'periodic_interval' "
"option. New configuration options have been introduced to "
"replace the use of 'periodic_interval' for some purposes. "
"Please consult the 'Upgrade' section of the Train release "
"notes for more information.")
return uc.Result(SUCCESS)
def _check_nested_quota(self) -> uc.Result:
"""Checks for the use of the nested quota driver.
The NestedDbQuotaDriver is deprecated in the Train release and is
removed in Wallaby release to prepare for upcoming unified limits
changes.
"""
# We import here to avoid conf loading order issues with cinder.service
# above.
import cinder.quota # noqa
quota_driver = CONF.quota_driver
if quota_driver == 'cinder.quota.NestedDbQuotaDriver':
return uc.Result(
FAILURE,
'The NestedDbQuotaDriver was deprecated in Train release '
'and is removed in Wallaby release.')
return uc.Result(SUCCESS)
def _check_legacy_windows_config(self) -> uc.Result:
"""Checks to ensure that the Windows driver path is properly updated.
The WindowsDriver was renamed in the Queens release to
WindowsISCSIDriver to avoid confusion with the SMB driver.
The backwards compatibility for this has now been removed, so
any cinder.conf settings still using
cinder.volume.drivers.windows.windows.WindowsDriver
must now be updated to use
cinder.volume.drivers.windows.iscsi.WindowsISCSIDriver.
"""
for volume_driver in _get_enabled_drivers():
if (volume_driver ==
"cinder.volume.drivers.windows.windows.WindowsDriver"):
return uc.Result(
FAILURE,
'Setting volume_driver to '
'cinder.volume.drivers.windows.windows.WindowsDriver '
'is no longer supported. Please update to use '
'cinder.volume.drivers.windows.iscsi.WindowsISCSIDriver '
'in cinder.conf.')
return uc.Result(SUCCESS)
def _check_removed_drivers(self) -> uc.Result:
"""Checks to ensure that no removed drivers are configured.
Checks start with drivers removed in the Stein release.
"""
removed_drivers = []
for volume_driver in _get_enabled_drivers():
for removed_driver in REMOVED_DRVRS:
if removed_driver in volume_driver:
removed_drivers.append(volume_driver)
if removed_drivers:
if len(removed_drivers) > 1:
return uc.Result(
FAILURE,
'The following drivers, which no longer exist, were found '
'configured in your cinder.conf file:\n%s.\n'
'These drivers have been removed and all data should '
'be migrated off of the associated backends before '
'upgrading Cinder.' % ",\n".join(removed_drivers))
else:
return uc.Result(
FAILURE,
'Found driver %s configured in your cinder.conf file. '
'This driver has been removed and all data should '
'be migrated off of this backend before upgrading '
'Cinder.' % removed_drivers[0])
return uc.Result(SUCCESS)
def _check_service_uuid(self) -> uc.Result:
try:
db.service_get_by_uuid(self.context, None)
except exception.ServiceNotFound:
volumes = db.volume_get_all(self.context,
limit=1,
filters={'service_uuid': None})
if not volumes:
return uc.Result(SUCCESS)
return uc.Result(
FAILURE,
'Services and volumes must have a service UUID. Please fix this '
'issue by running Queens online data migrations.')
def _check_attachment_specs(self):
if db.attachment_specs_exist(self.context):
return uc.Result(
FAILURE,
'There should be no more AttachmentSpecs in the system. '
'Please fix this issue by running Queens online data '
'migrations.')
return uc.Result(SUCCESS)
_upgrade_checks = (
# added in Stein
('Backup Driver Path', _check_backup_module),
('Use of Policy File', _check_policy_file),
('Windows Driver Path', _check_legacy_windows_config),
('Removed Drivers', _check_removed_drivers),
# added in Train
('Periodic Interval Use', _check_periodic_interval),
('Service UUIDs', _check_service_uuid),
('Attachment specs', _check_attachment_specs),
# added in Wallaby
('Use of Nested Quota Driver', _check_nested_quota),
)
def main():
# TODO(rosmaita): need to do this because we suggest using the
# --config-dir option, and if the user gives a bogus value, we
# get a stacktrace. Needs to be fixed in oslo_upgradecheck
try:
return uc.main(CONF, 'cinder', Checks())
except cfg.ConfigDirNotFoundError:
return ('ERROR: cannot read the cinder configuration directory.\n'
'Please re-run using the --config-dir option '
'with a valid cinder configuration directory.')
if __name__ == '__main__':
sys.exit(main())
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/cmd/volume.py 0000664 0000000 0000000 00000017107 15131732575 0022266 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Starter script for Cinder Volume."""
import logging as python_logging
import os
import re
import shlex
import sys
import eventlet
import eventlet.tpool
# Monkey patching must go before the oslo.log import, otherwise
# oslo.context will not use greenthread thread local and all greenthreads
# will share the same context.
if os.name == 'nt':
# eventlet monkey patching the os module causes subprocess.Popen to fail
# on Windows when using pipes due to missing non-blocking IO support.
eventlet.monkey_patch(os=False)
else:
eventlet.monkey_patch()
# Monkey patch the original current_thread to use the up-to-date _active
# global variable. See https://bugs.launchpad.net/bugs/1863021 and
# https://github.com/eventlet/eventlet/issues/592
import __original_module_threading as orig_threading # pylint: disable=E0401
import threading # noqa
orig_threading.current_thread.__globals__['_active'] = \
threading._active # type: ignore
import typing
import os_brick
from oslo_config import cfg
from oslo_log import log as logging
from oslo_privsep import priv_context
from oslo_reports import guru_meditation_report as gmr
from oslo_reports import opts as gmr_opts
# Need to register global_opts
from cinder.common import config # noqa
from cinder.common import constants
from cinder.db import api as session
from cinder import exception
from cinder import i18n
i18n.enable_lazy()
from cinder.i18n import _
from cinder import objects
from cinder import service
from cinder import utils
from cinder import version
if typing.TYPE_CHECKING:
import oslo_service
CONF = cfg.CONF
host_opt = cfg.StrOpt('backend_host', help='Backend override of host value.')
CONF.register_cli_opt(host_opt)
backend_name_opt = cfg.StrOpt(
'backend_name',
help='NOTE: For Windows internal use only. The name of the backend to be '
'managed by this process. It must be one of the backends defined '
'using the "enabled_backends" option. Note that normally, this '
'should not be used directly. Cinder uses it internally in order to '
'spawn subprocesses on Windows.')
CONF.register_cli_opt(backend_name_opt)
cluster_opt = cfg.StrOpt('cluster',
default=None,
help='Name of this cluster. Used to group volume '
'hosts that share the same backend '
'configurations to work in HA Active-Active '
'mode.')
CONF.register_opt(cluster_opt)
LOG = None
service_started = False
def _launch_service(launcher: 'oslo_service.ProcessLauncher',
backend: str) -> None:
CONF.register_opt(host_opt, group=backend)
backend_host = getattr(CONF, backend).backend_host
host = "%s@%s" % (backend_host or CONF.host, backend)
# We also want to set cluster to None on empty strings, and we
# ignore leading and trailing spaces.
cluster = CONF.cluster and CONF.cluster.strip()
cluster = (cluster or None) and '%s@%s' % (cluster, backend)
try:
server = service.Service.create(host=host,
service_name=backend,
binary=constants.VOLUME_BINARY,
coordination=True,
cluster=cluster)
except Exception:
assert LOG is not None
LOG.exception('Volume service %s failed to start.', host)
else:
# Dispose of the whole DB connection pool here before
# starting another process. Otherwise we run into cases where
# child processes share DB connections which results in errors.
session.dispose_engine()
launcher.launch_service(server)
_notify_service_started()
def _ensure_service_started() -> None:
if not service_started:
assert LOG is not None
LOG.error('No volume service(s) started successfully, terminating.')
sys.exit(1)
def _notify_service_started() -> None:
global service_started
service_started = True
def _launch_services_win32() -> None:
if CONF.backend_name and CONF.backend_name not in CONF.enabled_backends:
msg = _('The explicitly passed backend name "%(backend_name)s" is not '
'among the enabled backends: %(enabled_backends)s.')
raise exception.InvalidInput(
reason=msg % dict(backend_name=CONF.backend_name,
enabled_backends=CONF.enabled_backends))
# We'll avoid spawning a subprocess if a single backend is requested.
single_backend_name = (CONF.enabled_backends[0]
if len(CONF.enabled_backends) == 1
else CONF.backend_name)
if single_backend_name:
launcher = service.get_launcher()
_launch_service(launcher, single_backend_name)
elif CONF.enabled_backends:
# We're using the 'backend_name' argument, requesting a certain backend
# and constructing the service object within the child process.
launcher = service.WindowsProcessLauncher()
py_script_re = re.compile(r'.*\.py\w?$')
backend: str
for backend in filter(None, CONF.enabled_backends):
cmd = sys.argv + ['--backend_name=%s' % backend]
# Recent setuptools versions will trim '-script.py' and '.exe'
# extensions from sys.argv[0].
if py_script_re.match(sys.argv[0]):
cmd = [sys.executable] + cmd
launcher.add_process(cmd)
_notify_service_started()
_ensure_service_started()
launcher.wait()
def _launch_services_posix() -> None:
launcher = service.get_launcher()
backend: str
for backend in filter(None, CONF.enabled_backends):
_launch_service(launcher, backend)
_ensure_service_started()
launcher.wait()
def main() -> None:
objects.register_all()
gmr_opts.set_defaults(CONF)
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.setup(CONF, "cinder")
python_logging.captureWarnings(True)
priv_context.init(root_helper=shlex.split(utils.get_root_helper()))
utils.monkey_patch()
gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)
os_brick.setup(CONF)
global LOG
LOG = logging.getLogger(__name__)
if not CONF.enabled_backends:
LOG.error('Configuration for cinder-volume does not specify '
'"enabled_backends". Using DEFAULT section to configure '
'drivers is not supported since Ocata.')
sys.exit(1)
if os.name == 'nt':
# We cannot use oslo.service to spawn multiple services on Windows.
# It relies on forking, which is not available on Windows.
# Furthermore, service objects are unmarshallable objects that are
# passed to subprocesses.
_launch_services_win32()
else:
_launch_services_posix()
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/cmd/volume_usage_audit.py 0000664 0000000 0000000 00000023555 15131732575 0024644 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cron script to generate usage notifications for volumes existing during
the audit period.
Together with the notifications generated by volumes
create/delete/resize, over that time period, this allows an external
system consuming usage notification feeds to calculate volume usage
for each tenant.
Time periods are specified as 'hour', 'month', 'day' or 'year'
- `hour` - previous hour. If run at 9:07am, will generate usage for
8-9am.
- `month` - previous month. If the script is run April 1, it will
generate usages for March 1 through March 31.
- `day` - previous day. if run on July 4th, it generates usages for
July 3rd.
- `year` - previous year. If run on Jan 1, it generates usages for
Jan 1 through Dec 31 of the previous year.
"""
import datetime
import sys
import iso8601
from oslo_config import cfg
from oslo_log import log as logging
from cinder import i18n # noqa
i18n.enable_lazy()
from cinder import context
from cinder.i18n import _
from cinder import objects
from cinder import rpc
from cinder import utils
from cinder import version
import cinder.volume.volume_utils
CONF = cfg.CONF
script_opts = [
cfg.StrOpt('start_time',
help="If this option is specified then the start time "
"specified is used instead of the start time of the "
"last completed audit period."),
cfg.StrOpt('end_time',
help="If this option is specified then the end time "
"specified is used instead of the end time of the "
"last completed audit period."),
cfg.BoolOpt('send_actions',
default=False,
help="Send the volume and snapshot create and delete "
"notifications generated in the specified period."),
]
CONF.register_cli_opts(script_opts)
def _time_error(LOG, begin, end):
if CONF.start_time:
begin = datetime.datetime.strptime(CONF.start_time,
"%Y-%m-%d %H:%M:%S")
if CONF.end_time:
end = datetime.datetime.strptime(CONF.end_time,
"%Y-%m-%d %H:%M:%S")
begin = begin.replace(tzinfo=iso8601.UTC)
end = end.replace(tzinfo=iso8601.UTC)
if end <= begin:
msg = _("The end time (%(end)s) must be after the start "
"time (%(start)s).") % {'start': begin,
'end': end}
LOG.error(msg)
sys.exit(-1)
return begin, end
def _vol_notify_usage(LOG, volume_ref, extra_info, admin_context):
"""volume_ref notify usage"""
try:
LOG.debug("Send exists notification for "
"<%(extra_info)s>",
{'volume_id': volume_ref.id,
'project_id': volume_ref.project_id,
'extra_info': extra_info})
cinder.volume.volume_utils.notify_about_volume_usage(
admin_context, volume_ref, 'exists', extra_usage_info=extra_info)
except Exception as exc_msg:
LOG.error("Exists volume notification failed: %s",
exc_msg, resource=volume_ref)
def _snap_notify_usage(LOG, snapshot_ref, extra_info, admin_context):
"""snapshot_ref notify usage"""
try:
LOG.debug("Send notification for "
" <%(extra_info)s>",
{'snapshot_id': snapshot_ref.id,
'project_id': snapshot_ref.project_id,
'extra_info': extra_info})
cinder.volume.volume_utils.notify_about_snapshot_usage(
admin_context, snapshot_ref, 'exists', extra_info)
except Exception as exc_msg:
LOG.error("Exists snapshot notification failed: %s",
exc_msg, resource=snapshot_ref)
def _backup_notify_usage(LOG, backup_ref, extra_info, admin_context):
"""backup_ref notify usage"""
try:
cinder.volume.volume_utils.notify_about_backup_usage(
admin_context, backup_ref, 'exists', extra_info)
LOG.debug("Sent notification for "
" <%(extra_info)s>",
{'backup_id': backup_ref.id,
'project_id': backup_ref.project_id,
'extra_info': extra_info})
except Exception as exc_msg:
LOG.error("Exists backups notification failed: %s", exc_msg)
def _create_action(obj_ref, admin_context, LOG, notify_about_usage,
type_id_str, type_name):
try:
local_extra_info = {
'audit_period_beginning': str(obj_ref.created_at),
'audit_period_ending': str(obj_ref.created_at),
}
LOG.debug("Send create notification for <%(type_id_str)s: %(_id)s> "
" <%(extra_info)s>",
{'type_id_str': type_id_str,
'_id': obj_ref.id,
'project_id': obj_ref.project_id,
'extra_info': local_extra_info})
notify_about_usage(admin_context, obj_ref,
'create.start', extra_usage_info=local_extra_info)
notify_about_usage(admin_context, obj_ref,
'create.end', extra_usage_info=local_extra_info)
except Exception as exc_msg:
LOG.error("Create %(type)s notification failed: %(exc_msg)s",
{'type': type_name, 'exc_msg': exc_msg}, resource=obj_ref)
def _delete_action(obj_ref, admin_context, LOG, notify_about_usage,
type_id_str, type_name):
try:
local_extra_info = {
'audit_period_beginning': str(obj_ref.deleted_at),
'audit_period_ending': str(obj_ref.deleted_at),
}
LOG.debug("Send delete notification for <%(type_id_str)s: %(_id)s> "
" <%(extra_info)s>",
{'type_id_str': type_id_str,
'_id': obj_ref.id,
'project_id': obj_ref.project_id,
'extra_info': local_extra_info})
notify_about_usage(admin_context, obj_ref,
'delete.start', extra_usage_info=local_extra_info)
notify_about_usage(admin_context, obj_ref,
'delete.end', extra_usage_info=local_extra_info)
except Exception as exc_msg:
LOG.error("Delete %(type)s notification failed: %(exc_msg)s",
{'type': type_name, 'exc_msg': exc_msg}, resource=obj_ref)
def _obj_ref_action(_notify_usage, LOG, obj_ref, extra_info, admin_context,
begin, end, notify_about_usage, type_id_str, type_name):
_notify_usage(LOG, obj_ref, extra_info, admin_context)
if CONF.send_actions:
if begin < obj_ref.created_at < end:
_create_action(obj_ref, admin_context, LOG,
notify_about_usage, type_id_str, type_name)
if obj_ref.deleted_at and begin < obj_ref.deleted_at < end:
_delete_action(obj_ref, admin_context, LOG,
notify_about_usage, type_id_str, type_name)
def main():
objects.register_all()
admin_context = context.get_admin_context()
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.setup(CONF, "cinder")
LOG = logging.getLogger("cinder")
rpc.init(CONF)
begin, end = utils.last_completed_audit_period()
begin, end = _time_error(LOG, begin, end)
LOG.info("Starting volume usage audit")
LOG.info("Creating usages for %(begin_period)s until %(end_period)s",
{"begin_period": begin, "end_period": end})
extra_info = {
'audit_period_beginning': str(begin),
'audit_period_ending': str(end),
}
volumes = objects.VolumeList.get_all_active_by_window(admin_context,
begin,
end)
LOG.info("Found %d volumes", len(volumes))
for volume_ref in volumes:
_obj_ref_action(_vol_notify_usage, LOG, volume_ref, extra_info,
admin_context, begin, end,
cinder.volume.volume_utils.notify_about_volume_usage,
"volume_id", "volume")
snapshots = objects.SnapshotList.get_all_active_by_window(admin_context,
begin, end)
LOG.info("Found %d snapshots", len(snapshots))
for snapshot_ref in snapshots:
_obj_ref_action(_snap_notify_usage, LOG, snapshot_ref, extra_info,
admin_context, begin, end,
cinder.volume.volume_utils.notify_about_snapshot_usage,
"snapshot_id", "snapshot")
backups = objects.BackupList.get_all_active_by_window(admin_context,
begin, end)
LOG.info("Found %d backups", len(backups))
for backup_ref in backups:
_obj_ref_action(_backup_notify_usage, LOG, backup_ref, extra_info,
admin_context, begin, end,
cinder.volume.volume_utils.notify_about_backup_usage,
"backup_id", "backup")
LOG.info("Volume usage audit completed")
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/common/ 0000775 0000000 0000000 00000000000 15131732575 0021124 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/common/__init__.py 0000664 0000000 0000000 00000000000 15131732575 0023223 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/common/config.py 0000664 0000000 0000000 00000025332 15131732575 0022750 0 ustar 00root root 0000000 0000000 # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 NTT corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Command-line flag library.
Emulates gflags by wrapping cfg.ConfigOpts.
The idea is to move fully to cfg eventually, and this wrapper is a
stepping stone.
"""
import socket
from oslo_config import cfg
from oslo_log import log as logging
from oslo_middleware import cors
from oslo_policy import opts as policy_opts
from oslo_utils import netutils
CONF = cfg.CONF
logging.register_options(CONF)
core_opts = [
cfg.StrOpt('state_path',
default='/var/lib/cinder',
help="Top-level directory for maintaining cinder's state"), ]
CONF.register_cli_opts(core_opts)
api_opts = [
cfg.BoolOpt('api_rate_limit',
default=True,
help='Enables or disables rate limit of the API.'),
cfg.StrOpt('group_api_class',
default='cinder.group.api.API',
help='The full class name of the group API class'),
cfg.ListOpt('osapi_volume_ext_list',
default=[],
help='Specify list of extensions to load when using osapi_'
'volume_extension option with cinder.api.contrib.'
'select_extensions'),
cfg.MultiStrOpt('osapi_volume_extension',
default=['cinder.api.contrib.standard_extensions'],
help='osapi volume extension to load'),
cfg.StrOpt('volume_api_class',
default='cinder.volume.api.API',
help='The full class name of the volume API class to use'),
]
global_opts = [
cfg.HostAddressOpt('my_ip',
sample_default='',
default=netutils.get_my_ipv4(),
help='IP address of this host'),
cfg.StrOpt('volume_manager',
default='cinder.volume.manager.VolumeManager',
help='Full class name for the Manager for volume'),
cfg.StrOpt('scheduler_manager',
default='cinder.scheduler.manager.SchedulerManager',
help='Full class name for the Manager for scheduler'),
cfg.StrOpt('host',
sample_default='localhost',
default=socket.gethostname(),
help='Name of this node. This can be an opaque '
'identifier. It is not necessarily a host name, '
'FQDN, or IP address.'),
# NOTE(vish): default to nova for compatibility with nova installs
cfg.StrOpt('storage_availability_zone',
default='nova',
help='Availability zone of this node. Can be overridden per '
'volume backend with the option '
'"backend_availability_zone".'),
cfg.StrOpt('default_availability_zone',
help='Default availability zone for new volumes. If not set, '
'the storage_availability_zone option value is used as '
'the default for new volumes.'),
cfg.BoolOpt('allow_availability_zone_fallback',
default=False,
help='If the requested Cinder availability zone is '
'unavailable, fall back to the value of '
'default_availability_zone, then '
'storage_availability_zone, instead of failing.'),
cfg.StrOpt('default_volume_type',
default='__DEFAULT__',
required=True,
help='Default volume type to use'),
cfg.StrOpt('default_group_type',
help='Default group type to use'),
cfg.StrOpt('volume_usage_audit_period',
default='month',
help='Time period for which to generate volume usages. '
'The options are hour, day, month, or year.'),
cfg.StrOpt('rootwrap_config',
default='/etc/cinder/rootwrap.conf',
help='Path to the rootwrap configuration file to use for '
'running commands as root'),
cfg.BoolOpt('monkey_patch',
default=False,
help='Enable monkey patching'),
cfg.ListOpt('monkey_patch_modules',
default=[],
help='List of modules/decorators to monkey patch'),
cfg.IntOpt('service_down_time',
default=60,
help='Maximum time since last check-in for a service to be '
'considered up'),
cfg.ListOpt('enabled_backends',
help='A list of backend names to use. These backend names '
'should be backed by a unique [CONFIG] group '
'with its options'),
cfg.BoolOpt('no_snapshot_gb_quota',
default=False,
help="Whether snapshots sizes count against global and per "
"volume type gigabyte quotas. By default snapshots' "
"sizes are counted."),
cfg.StrOpt('transfer_api_class',
default='cinder.transfer.api.API',
help='The full class name of the volume transfer API class'),
cfg.StrOpt('consistencygroup_api_class',
default='cinder.consistencygroup.api.API',
help='The full class name of the consistencygroup API class'),
cfg.BoolOpt('split_loggers',
default=False,
help='Log requests to multiple loggers.')
]
auth_opts = [
cfg.StrOpt('auth_strategy',
default='keystone',
choices=[('noauth', 'Do not perform authentication'),
('noauth_include_project_id',
'Do not perform authentication, and include a'
' project_id in API URLs'),
('keystone', 'Authenticate using keystone')],
help='The strategy to use for auth. Supports noauth,'
' noauth_include_project_id or keystone.'),
]
backup_opts = [
cfg.StrOpt('backup_api_class',
default='cinder.backup.api.API',
help='The full class name of the volume backup API class'),
cfg.StrOpt('backup_manager',
default='cinder.backup.manager.BackupManager',
help='Full class name for the Manager for volume backup'),
]
image_opts = [
cfg.ListOpt('glance_api_servers',
default=None,
help='A list of the URLs of glance API servers available to '
'cinder ([http[s]://][hostname|ip]:port). If protocol '
'is not specified it defaults to http.'),
cfg.IntOpt('glance_num_retries',
min=0,
default=3,
help='Number retries when downloading an image from glance'),
cfg.BoolOpt('glance_api_insecure',
default=False,
help='Allow to perform insecure SSL (https) requests to '
'glance (https will be used but cert validation will '
'not be performed).'),
cfg.BoolOpt('glance_api_ssl_compression',
default=False,
help='Enables or disables negotiation of SSL layer '
'compression. In some cases disabling compression '
'can improve data throughput, such as when high '
'network bandwidth is available and you use '
'compressed image formats like qcow2.'),
cfg.StrOpt('glance_ca_certificates_file',
help='Location of ca certificates file to use for glance '
'client requests.'),
cfg.StrOpt('glance_certfile',
help='Location of certificate file to use for glance '
'client requests.'),
cfg.StrOpt('glance_keyfile',
help='Location of certificate key file to use for glance '
'client requests.'),
cfg.IntOpt('glance_request_timeout',
help='http/https timeout value for glance operations. If no '
'value (None) is supplied here, the glanceclient default '
'value is used.'),
]
compression_opts = [
cfg.StrOpt('compression_format',
default='gzip',
choices=[('gzip', 'GNUzip format')],
help='Image compression format on image upload'),
cfg.BoolOpt('allow_compression_on_image_upload',
default=False,
help='The strategy to use for image compression on upload. '
'Default is disallow compression.'),
]
CONF.register_opts(api_opts)
CONF.register_opts(core_opts)
CONF.register_opts(auth_opts)
CONF.register_opts(backup_opts)
CONF.register_opts(image_opts)
CONF.register_opts(global_opts)
CONF.register_opts(compression_opts)
def set_middleware_defaults():
"""Update default configuration options for oslo.middleware."""
cors.set_defaults(
allow_headers=['X-Auth-Token',
'X-Identity-Status',
'X-Roles',
'X-Service-Catalog',
'X-User-Id',
'X-Tenant-Id',
'X-OpenStack-Request-ID',
'X-Trace-Info',
'X-Trace-HMAC',
'OpenStack-API-Version'],
expose_headers=['X-Auth-Token',
'X-Subject-Token',
'X-Service-Token',
'X-OpenStack-Request-ID',
'OpenStack-API-Version'],
allow_methods=['GET',
'PUT',
'POST',
'DELETE',
'PATCH',
'HEAD']
)
def set_external_library_defaults():
"""Set default configuration options for external openstack libraries."""
# This function is required so that our settings will override the defaults
# set by the libraries when the Cinder config files are generated. This
# function is declared as an entry point for oslo.config.opts.defaults in
# setup.cfg.
set_middleware_defaults()
policy_opts.set_defaults(CONF,
enforce_scope=False,
enforce_new_defaults=False)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/common/constants.py 0000664 0000000 0000000 00000003675 15131732575 0023525 0 ustar 00root root 0000000 0000000 # Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# The maximum value a signed INT type may have
DB_MAX_INT = 0x7FFFFFFF
# The cinder services binaries and topics' names
API_BINARY = "cinder-api"
SCHEDULER_BINARY = "cinder-scheduler"
VOLUME_BINARY = "cinder-volume"
BACKUP_BINARY = "cinder-backup"
SCHEDULER_TOPIC = SCHEDULER_BINARY
VOLUME_TOPIC = VOLUME_BINARY
BACKUP_TOPIC = BACKUP_BINARY
LOG_BINARIES = (SCHEDULER_BINARY, VOLUME_BINARY, BACKUP_BINARY, API_BINARY)
# The encryption key ID used by the legacy fixed-key ConfKeyMgr
FIXED_KEY_ID = '00000000-0000-0000-0000-000000000000'
# Storage protocol constants
CEPH = 'ceph'
DRBD = 'DRBD'
FC = 'FC'
FC_VARIANT_1 = 'fibre_channel'
FC_VARIANT_2 = 'fc'
FILE = 'file'
ISCSI = 'iSCSI'
ISCSI_VARIANT = 'iscsi'
ISER = 'iSER'
LIGHTOS = 'lightos'
NFS = 'NFS'
NFS_VARIANT = 'nfs'
NVMEOF = 'NVMe-oF'
NVMEOF_VARIANT_1 = 'NVMeOF'
NVMEOF_VARIANT_2 = 'nvmeof'
NVMEOF_ROCE = 'NVMe-RoCE'
NVMEOF_FC = 'NVMe-FC'
NVMEOF_TCP = 'NVMe-TCP'
SCALEIO = 'scaleio'
SCSI = 'SCSI'
STORPOOL = 'storpool'
VMDK = 'vmdk'
VSTORAGE = 'vstorageobject'
# These must be strings, because there are places that check specific type
ISCSI_VARIANTS = [ISCSI, ISCSI_VARIANT]
FC_VARIANTS = [FC, FC_VARIANT_1, FC_VARIANT_2]
NFS_VARIANTS = [NFS, NFS_VARIANT]
NVMEOF_VARIANTS = [NVMEOF, NVMEOF_VARIANT_1, NVMEOF_VARIANT_2]
CACHEABLE_PROTOCOLS = FC_VARIANTS + ISCSI_VARIANTS + NVMEOF_VARIANTS
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/common/sqlalchemyutils.py 0000664 0000000 0000000 00000015352 15131732575 0024727 0 ustar 00root root 0000000 0000000 # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of paginate query."""
import datetime
from oslo_log import log as logging
import sqlalchemy
import sqlalchemy.sql as sa_sql
from sqlalchemy.sql import type_api
from cinder.db import api
from cinder import exception
from cinder.i18n import _
LOG = logging.getLogger(__name__)
_TYPE_SCHEMA = {
'datetime': datetime.datetime(1900, 1, 1),
'big_integer': 0,
'integer': 0,
'string': '',
'boolean': False,
}
def _get_default_column_value(model, column_name):
"""Return the default value of the columns from DB table.
In postgreDB case, if no right default values are being set, an
psycopg2.DataError will be thrown.
"""
attr = getattr(model, column_name)
# Return the default value directly if the model contains. Otherwise return
# a default value which is not None.
if attr.default and isinstance(attr.default, type_api.TypeEngine):
return attr.default.arg
attr_type = attr.type
return _TYPE_SCHEMA[attr_type.__visit_name__]
# TODO(wangxiyuan): Use oslo_db.sqlalchemy.utils.paginate_query once it is
# stable and afforded by the minimum version in requirement.txt.
# copied from glance/db/sqlalchemy/api.py
def paginate_query(query, model, limit, sort_keys, marker=None,
sort_dir=None, sort_dirs=None, offset=None):
"""Returns a query with sorting / pagination criteria added.
Pagination works by requiring a unique sort_key, specified by sort_keys.
(If sort_keys is not unique, then we risk looping through values.)
We use the last row in the previous page as the 'marker' for pagination.
So we must return values that follow the passed marker in the order.
With a single-valued sort_key, this would be easy: sort_key > X.
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
the lexicographical ordering:
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
We also have to cope with different sort_directions.
Typically, the id of the last row is used as the client-facing pagination
marker, then the actual marker object must be fetched from the db and
passed in to us as marker.
:param query: the query object to which we should add paging/sorting
:param model: the ORM model class
:param limit: maximum number of items to return
:param sort_keys: array of attributes by which results should be sorted
:param marker: the last item of the previous page; we returns the next
results after this value.
:param sort_dir: direction in which results should be sorted (asc, desc)
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
:param offset: the number of items to skip from the marker or from the
first element.
:rtype: sqlalchemy.orm.query.Query
:return: The query with sorting/pagination added.
"""
if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id
LOG.warning('Id not in sort_keys; is sort_keys unique?')
if sort_dir and sort_dirs:
raise AssertionError('Both sort_dir and sort_dirs specified.')
# Default the sort direction to ascending
if sort_dirs is None and sort_dir is None:
sort_dir = 'asc'
# Ensure a per-column sort direction
if sort_dirs is None:
sort_dirs = [sort_dir for _sort_key in sort_keys]
if len(sort_dirs) != len(sort_keys):
raise AssertionError(
'sort_dirs length is not equal to sort_keys length.')
# Add sorting
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
sort_dir_func = {
'asc': sqlalchemy.asc,
'desc': sqlalchemy.desc,
}[current_sort_dir]
try:
sort_key_attr = getattr(model, current_sort_key)
except AttributeError:
raise exception.InvalidInput(reason='Invalid sort key')
if not api.is_orm_value(sort_key_attr):
raise exception.InvalidInput(reason='Invalid sort key')
query = query.order_by(sort_dir_func(sort_key_attr))
# Add pagination
if marker is not None:
marker_values = []
for sort_key in sort_keys:
v = getattr(marker, sort_key)
if v is None:
v = _get_default_column_value(model, sort_key)
marker_values.append(v)
# Build up an array of sort criteria as in the docstring
criteria_list = []
for i in range(0, len(sort_keys)):
crit_attrs = []
for j in range(0, i):
model_attr = getattr(model, sort_keys[j])
default = _get_default_column_value(model, sort_keys[j])
attr = sa_sql.expression.case(
*[(model_attr.isnot(None), model_attr)],
else_=default,
)
crit_attrs.append((attr == marker_values[j]))
model_attr = getattr(model, sort_keys[i])
default = _get_default_column_value(model, sort_keys[i])
attr = sa_sql.expression.case(
*[(model_attr.isnot(None), model_attr)],
else_=default,
)
if isinstance(model_attr.type, sqlalchemy.Boolean):
marker_values[i] = int(marker_values[i])
if sort_dirs[i] == 'desc':
crit_attrs.append((attr < marker_values[i]))
elif sort_dirs[i] == 'asc':
crit_attrs.append((attr > marker_values[i]))
else:
raise ValueError(_("Unknown sort direction, "
"must be 'desc' or 'asc'"))
criteria = sqlalchemy.sql.and_(*crit_attrs)
criteria_list.append(criteria)
f = sqlalchemy.sql.or_(*criteria_list)
query = query.filter(f)
if limit is not None:
query = query.limit(limit)
if offset:
query = query.offset(offset)
return query
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/compute/ 0000775 0000000 0000000 00000000000 15131732575 0021310 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/compute/__init__.py 0000664 0000000 0000000 00000002072 15131732575 0023422 0 ustar 00root root 0000000 0000000 # Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_utils import importutils
compute_opts = [
cfg.StrOpt('compute_api_class',
default='cinder.compute.nova.API',
help='The full class name of the '
'compute API class to use'),
]
CONF = cfg.CONF
CONF.register_opts(compute_opts)
def API():
compute_api_class = CONF.compute_api_class
cls = importutils.import_class(compute_api_class)
return cls()
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/compute/nova.py 0000664 0000000 0000000 00000022307 15131732575 0022631 0 ustar 00root root 0000000 0000000 # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests to Nova.
"""
from keystoneauth1 import exceptions as ks_exc
from keystoneauth1 import identity
from keystoneauth1 import loading as ks_loading
from novaclient import api_versions
from novaclient import client as nova_client
from novaclient import exceptions as nova_exceptions
from oslo_config import cfg
from oslo_log import log as logging
from requests import exceptions as request_exceptions
from cinder.db import base
from cinder import exception
from cinder.message import api as message_api
from cinder.message import message_field
from cinder import service_auth
nova_opts = [
cfg.StrOpt('region_name',
help='Name of nova region to use. Useful if keystone manages '
'more than one region.'),
cfg.StrOpt('interface',
default='public',
choices=['public', 'admin', 'internal'],
help='Type of the nova endpoint to use. This endpoint will '
'be looked up in the keystone catalog and should be '
'one of public, internal or admin.'),
cfg.StrOpt('token_auth_url',
help='The authentication URL for the nova connection when '
'using the current user''s token'),
]
NOVA_GROUP = 'nova'
CONF = cfg.CONF
nova_session_opts = ks_loading.get_session_conf_options()
nova_auth_opts = ks_loading.get_auth_common_conf_options()
CONF.register_opts(nova_opts, group=NOVA_GROUP)
CONF.register_opts(nova_session_opts, group=NOVA_GROUP)
CONF.register_opts(nova_auth_opts, group=NOVA_GROUP)
LOG = logging.getLogger(__name__)
NOVA_API_VERSION = "2.1"
nova_extensions = [ext for ext in
nova_client.discover_extensions(NOVA_API_VERSION)
if ext.name in ("assisted_volume_snapshots",
"list_extensions",
"server_external_events")]
def _get_identity_endpoint_from_sc(context):
# Search for the identity endpoint in the service catalog
for service in context.service_catalog:
if service.get('type') != 'identity':
continue
for endpoint in service['endpoints']:
if (not CONF[NOVA_GROUP].region_name or
endpoint.get('region') == CONF[NOVA_GROUP].region_name):
return endpoint.get(CONF[NOVA_GROUP].interface + 'URL')
raise ks_exc.EndpointNotFound()
def novaclient(context, privileged_user=False, timeout=None, api_version=None):
"""Returns a Nova client
@param privileged_user:
If True, use the account from configuration
(requires 'auth_type' and the other usual Keystone authentication
options to be set in the [nova] section)
@param timeout:
Number of seconds to wait for an answer before raising a
Timeout exception (None to disable)
@param api_version:
api version of nova
"""
if privileged_user and CONF[NOVA_GROUP].auth_type:
LOG.debug('Creating Keystone auth plugin from conf')
n_auth = ks_loading.load_auth_from_conf_options(CONF, NOVA_GROUP)
else:
if CONF[NOVA_GROUP].token_auth_url:
url = CONF[NOVA_GROUP].token_auth_url
else:
url = _get_identity_endpoint_from_sc(context)
LOG.debug('Creating Keystone token plugin using URL: %s', url)
n_auth = identity.Token(auth_url=url,
token=context.auth_token,
project_name=context.project_name,
project_domain_id=context.project_domain_id)
if CONF.auth_strategy == 'keystone':
n_auth = service_auth.get_auth_plugin(context, auth=n_auth)
keystone_session = ks_loading.load_session_from_conf_options(
CONF,
NOVA_GROUP,
auth=n_auth)
c = nova_client.Client(
api_versions.APIVersion(api_version or NOVA_API_VERSION),
session=keystone_session,
insecure=CONF[NOVA_GROUP].insecure,
timeout=timeout,
region_name=CONF[NOVA_GROUP].region_name,
endpoint_type=CONF[NOVA_GROUP].interface,
cacert=CONF[NOVA_GROUP].cafile,
global_request_id=context.global_id,
extensions=nova_extensions)
return c
class API(base.Base):
"""API for interacting with novaclient."""
NotFound = nova_exceptions.NotFound
def __init__(self):
self.message_api = message_api.API()
super().__init__()
def _get_volume_extended_event(self, server_id, volume_id):
return {'name': 'volume-extended',
'server_uuid': server_id,
'tag': volume_id}
def _get_volume_reimaged_event(self, server_id, volume_id):
return {'name': 'volume-reimaged',
'server_uuid': server_id,
'tag': volume_id}
def _send_events(self, context, events, api_version=None):
nova = novaclient(context, privileged_user=True,
api_version=api_version)
try:
response = nova.server_external_events.create(events)
except nova_exceptions.NotFound:
LOG.warning('Nova returned NotFound for events: %s.', events)
return False
except Exception:
LOG.exception('Failed to notify nova on events: %s.', events)
return False
else:
if not isinstance(response, list):
LOG.error('Error response returned from nova: %s.', response)
return False
response_error = False
for event in response:
code = event.get('code')
if code is None:
response_error = True
continue
if code != 200:
LOG.warning(
'Nova event: %s returned with failed status.', event)
else:
LOG.info('Nova event response: %s.', event)
if response_error:
LOG.error('Error response returned from nova: %s.', response)
return False
return True
def update_server_volume(self, context, server_id, src_volid,
new_volume_id):
nova = novaclient(context, privileged_user=True)
nova.volumes.update_server_volume(server_id,
src_volid,
new_volume_id)
def create_volume_snapshot(self, context, volume_id, create_info):
nova = novaclient(context, privileged_user=True)
# pylint: disable=E1101
nova.assisted_volume_snapshots.create(
volume_id,
create_info=create_info)
def delete_volume_snapshot(self, context, snapshot_id, delete_info):
nova = novaclient(context, privileged_user=True)
# pylint: disable=E1101
nova.assisted_volume_snapshots.delete(
snapshot_id,
delete_info=delete_info)
def get_server(self, context, server_id, privileged_user=False,
timeout=None):
try:
return novaclient(context, privileged_user=privileged_user,
timeout=timeout).servers.get(server_id)
except nova_exceptions.NotFound:
raise exception.ServerNotFound(uuid=server_id)
except request_exceptions.Timeout:
raise exception.APITimeout(service='Nova')
def extend_volume(self, context, server_ids, volume_id):
api_version = '2.51'
events = [self._get_volume_extended_event(server_id, volume_id)
for server_id in server_ids]
result = self._send_events(context, events, api_version=api_version)
if not result:
self.message_api.create(
context,
message_field.Action.EXTEND_VOLUME,
resource_uuid=volume_id,
detail=message_field.Detail.NOTIFY_COMPUTE_SERVICE_FAILED)
return result
def reimage_volume(self, context, server_ids, volume_id):
api_version = '2.93'
events = [self._get_volume_reimaged_event(server_id, volume_id)
for server_id in server_ids]
result = self._send_events(context, events, api_version=api_version)
if not result:
self.message_api.create(
context,
message_field.Action.REIMAGE_VOLUME,
resource_uuid=volume_id,
detail=message_field.Detail.REIMAGE_VOLUME_FAILED)
return result
@staticmethod
def get_server_volume(context, server_id, volume_id):
# Use microversion that includes attachment_id
nova = novaclient(context, api_version='2.89')
return nova.volumes.get_server_volume(server_id, volume_id)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/context.py 0000664 0000000 0000000 00000030571 15131732575 0021700 0 ustar 00root root 0000000 0000000 # Copyright 2011 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RequestContext: context for requests that persist through all of cinder."""
import copy
from typing import Any, Optional
from keystoneauth1.access import service_catalog as ksa_service_catalog
from keystoneauth1 import plugin
from oslo_config import cfg
from oslo_context import context
from oslo_db.sqlalchemy import enginefacade
from oslo_log import log as logging
from oslo_utils import timeutils
from cinder import exception
from cinder.i18n import _
from cinder.objects import base as objects_base
from cinder import policy
context_opts = [
cfg.StrOpt('cinder_internal_tenant_project_id',
help='ID of the project which will be used as the Cinder '
'internal tenant.'),
cfg.StrOpt('cinder_internal_tenant_user_id',
help='ID of the user to be used in volume operations as the '
'Cinder internal tenant.'),
]
CONF = cfg.CONF
CONF.register_opts(context_opts)
LOG = logging.getLogger(__name__)
class _ContextAuthPlugin(plugin.BaseAuthPlugin):
"""A keystoneauth auth plugin that uses the values from the Context.
Ideally we would use the plugin provided by auth_token middleware however
this plugin isn't serialized yet so we construct one from the serialized
auth data.
"""
def __init__(self, auth_token, sc):
super(_ContextAuthPlugin, self).__init__()
self.auth_token = auth_token
self.service_catalog = ksa_service_catalog.ServiceCatalogV2(sc)
def get_token(self, *args, **kwargs):
return self.auth_token
def get_endpoint(self, session, service_type=None, interface=None,
region_name=None, service_name=None, **kwargs):
return self.service_catalog.url_for(service_type=service_type,
service_name=service_name,
interface=interface,
region_name=region_name)
@enginefacade.transaction_context_provider
class RequestContext(context.RequestContext):
"""Security context and request information.
Represents the user taking a given action within the system.
"""
def __init__(self,
user_id: Optional[str] = None,
project_id: Optional[str] = None,
is_admin: Optional[bool] = None,
read_deleted: Optional[str] = "no",
project_name: Optional[str] = None,
remote_address: Optional[str] = None,
timestamp=None,
quota_class=None,
service_catalog: Optional[dict] = None,
user_auth_plugin=None,
message_resource_id = None,
message_resource_type = None,
message_action = None,
**kwargs):
"""Initialize RequestContext.
:param read_deleted: 'no' indicates deleted records are hidden, 'yes'
indicates deleted records are visible, 'only' indicates that
*only* deleted records are visible.
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
"""
# NOTE(smcginnis): To keep it compatible for code using positional
# args, explicityly set user_id and project_id in kwargs.
kwargs.setdefault('user_id', user_id)
kwargs.setdefault('project_id', project_id)
super(RequestContext, self).__init__(is_admin=is_admin, **kwargs)
self.project_name = project_name
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
timestamp = timeutils.utcnow()
elif isinstance(timestamp, str):
timestamp = timeutils.parse_isotime(timestamp)
self.timestamp = timestamp
self.quota_class = quota_class
self.message_resource_id = message_resource_id
self.message_resource_type = message_resource_type
self.message_action = message_action
if service_catalog:
# Only include required parts of service_catalog
self.service_catalog = [s for s in service_catalog
if s.get('type') in
('identity', 'compute', 'object-store',
'image', 'key-manager')]
else:
# if list is empty or none
self.service_catalog = []
# We need to have RequestContext attributes defined
# when policy.check_is_admin invokes request logging
# to make it loggable.
self.is_admin: Optional[bool]
if self.is_admin is None:
self.is_admin = policy.check_is_admin(self)
elif self.is_admin and 'admin' not in self.roles:
self.roles.append('admin')
self.user_auth_plugin = user_auth_plugin
def get_auth_plugin(self):
if self.user_auth_plugin:
return self.user_auth_plugin
else:
return _ContextAuthPlugin(self.auth_token, self.service_catalog)
def _get_read_deleted(self) -> str:
return self._read_deleted
def _set_read_deleted(self, read_deleted: str) -> None:
if read_deleted not in ('no', 'yes', 'only'):
raise ValueError(_("read_deleted can only be one of 'no', "
"'yes' or 'only', not %r") % read_deleted)
self._read_deleted = read_deleted
def _del_read_deleted(self) -> None:
del self._read_deleted
read_deleted = property(_get_read_deleted, _set_read_deleted,
_del_read_deleted)
def to_dict(self) -> dict[str, Any]:
result = super(RequestContext, self).to_dict()
result['user_id'] = self.user_id
result['project_id'] = self.project_id
result['project_name'] = self.project_name
result['domain_id'] = self.domain_id
result['read_deleted'] = self.read_deleted
result['remote_address'] = self.remote_address
result['timestamp'] = self.timestamp.isoformat()
result['quota_class'] = self.quota_class
result['service_catalog'] = self.service_catalog
result['request_id'] = self.request_id
result['message_resource_id'] = self.message_resource_id
result['message_resource_type'] = self.message_resource_type
result['message_action'] = self.message_action
return result
@classmethod
def from_dict(cls, values: dict) -> 'RequestContext':
return cls(user_id=values.get('user_id'),
project_id=values.get('project_id'),
project_name=values.get('project_name'),
domain_id=values.get('domain_id'),
read_deleted=values.get('read_deleted', 'no'),
remote_address=values.get('remote_address'),
timestamp=values.get('timestamp'),
quota_class=values.get('quota_class'),
service_catalog=values.get('service_catalog'),
request_id=values.get('request_id'),
global_request_id=values.get('global_request_id'),
is_admin=values.get('is_admin'),
roles=values.get('roles'),
auth_token=values.get('auth_token'),
user_domain_id=values.get('user_domain_id'),
project_domain_id=values.get('project_domain_id'),
message_resource_id = values.get('message_resource_id'),
message_resource_type = values.get('message_resource_type'),
message_action = values.get('message_action')
)
def authorize(self,
action: str,
target: Optional[dict] = None,
target_obj: Optional[dict] = None,
fatal: bool = True):
"""Verify that the given action is valid on the target in this context.
:param action: string representing the action to be checked.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``.
If None, then this default target will be considered:
{'project_id': self.project_id, 'user_id': self.user_id}
:param target_obj: dictionary representing the object which will be
used to update target.
:param fatal: if False, will return False when an
exception.PolicyNotAuthorized occurs.
:raises cinder.exception.NotAuthorized: if verification fails and fatal
is True.
:return: returns a non-False value (not necessarily "True") if
authorized and False if not authorized and fatal is False.
"""
if target is None:
target = {'project_id': self.project_id,
'user_id': self.user_id}
if isinstance(target_obj, objects_base.CinderObject):
# Turn object into dict so target.update can work
target.update(
target_obj.obj_to_primitive()['versioned_object.data'] or {})
# Ensure 'project_id' and 'user_id' attributes are captured.
# Some objects (e.g. attachments) have a project_id attribute
# that isn't present in the dict. The try/except wrappers avoid
# lazy-load issues when the attribute doesn't exist.
try:
target['project_id'] = target_obj.project_id
except Exception:
pass
try:
target['user_id'] = target_obj.user_id
except Exception:
pass
else:
target.update(target_obj or {})
return policy.authorize(self, action, target, do_raise=fatal,
exc=exception.PolicyNotAuthorized)
def to_policy_values(self) -> dict:
policy = super(RequestContext, self).to_policy_values()
policy['is_admin'] = self.is_admin
return policy
def elevated(self,
read_deleted: Optional[str] = None,
overwrite: bool = False) -> 'RequestContext':
"""Return a version of this context with admin flag set."""
context = self.deepcopy()
context.is_admin = True
if 'admin' not in context.roles:
context.roles.append('admin')
if read_deleted is not None:
context.read_deleted = read_deleted
return context
def deepcopy(self) -> 'RequestContext':
return copy.deepcopy(self)
def get_admin_context(read_deleted: Optional[str] = "no") -> RequestContext:
return RequestContext(user_id=None,
project_id=None,
is_admin=True,
read_deleted=read_deleted,
overwrite=False)
def get_internal_tenant_context() -> Optional[RequestContext]:
"""Build and return the Cinder internal tenant context object
This request context will only work for internal Cinder operations. It will
not be able to make requests to remote services. To do so it will need to
use the keystone client to get an auth_token.
"""
project_id = CONF.cinder_internal_tenant_project_id
user_id = CONF.cinder_internal_tenant_user_id
if project_id and user_id:
return RequestContext(user_id=user_id,
project_id=project_id,
is_admin=True,
overwrite=False)
else:
LOG.warning('Unable to get internal tenant context: Missing '
'required config parameters.')
return None
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/coordination.py 0000664 0000000 0000000 00000020062 15131732575 0022676 0 ustar 00root root 0000000 0000000 # Copyright 2015 Intel
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Coordination and locking utilities."""
import errno
import glob
import inspect
import os
import re
import sys
from typing import Callable, Optional
import uuid
import decorator
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
from tooz import coordination
from cinder import exception
from cinder.i18n import _
from cinder import utils
LOG = log.getLogger(__name__)
coordination_opts = [
cfg.StrOpt('backend_url',
secret=True,
default='file://$state_path',
help='The backend URL to use for distributed coordination.'),
]
CONF = cfg.CONF
CONF.register_opts(coordination_opts, group='coordination')
class Coordinator(object):
"""Tooz coordination wrapper.
Coordination member id is created from concatenated
`prefix` and `agent_id` parameters.
:param str agent_id: Agent identifier
:param str prefix: Used to provide member identifier with a
meaningful prefix.
"""
def __init__(self, agent_id: Optional[str] = None, prefix: str = ''):
self.coordinator = None
self.agent_id = agent_id or str(uuid.uuid4())
self.started = False
self.prefix = prefix
self._file_path = None
def _get_file_path(self, backend_url):
if backend_url.startswith('file://'):
path = backend_url[7:]
# Copied from TooZ's _normalize_path to get the same path they use
if sys.platform == 'win32':
path = re.sub(r'\\(?=\w:\\)', '', os.path.normpath(path))
return os.path.abspath(os.path.join(path, self.prefix))
return None
def start(self) -> None:
if self.started:
return
backend_url = cfg.CONF.coordination.backend_url
# NOTE(bluex): Tooz expects member_id as a byte string.
member_id = (self.prefix + self.agent_id).encode('ascii')
self.coordinator = coordination.get_coordinator(backend_url, member_id)
assert self.coordinator is not None
self.coordinator.start(start_heart=True)
self._file_path = self._get_file_path(backend_url)
self.started = True
def stop(self) -> None:
"""Disconnect from coordination backend and stop heartbeat."""
if self.started:
if self.coordinator is not None:
self.coordinator.stop()
self.coordinator = None
self.started = False
def get_lock(self, name: str):
"""Return a Tooz backend lock.
:param str name: The lock name that is used to identify it
across all nodes.
"""
# NOTE(bluex): Tooz expects lock name as a byte string.
lock_name = (self.prefix + name).encode('ascii')
if self.coordinator is not None:
return self.coordinator.get_lock(lock_name)
else:
raise exception.LockCreationFailed(_('Coordinator uninitialized.'))
def remove_lock(self, glob_name):
# Most locks clean up on release, but not the file lock, so we manually
# clean them.
def _err(file_name: str, exc: Exception) -> None:
LOG.warning('Failed to cleanup lock %(name)s: %(exc)s',
{'name': file_name, 'exc': exc})
if self._file_path:
files = glob.glob(self._file_path + glob_name)
for file_name in files:
try:
os.remove(file_name)
except OSError as exc:
if (exc.errno != errno.ENOENT):
_err(file_name, exc)
except Exception as exc:
_err(file_name, exc)
COORDINATOR = Coordinator(prefix='cinder-')
def synchronized_remove(glob_name, coordinator=COORDINATOR):
coordinator.remove_lock(glob_name)
def __acquire(lock, blocking, f_name):
"""Acquire a lock and return the time when it was acquired."""
t1 = timeutils.now()
name = utils.convert_str(lock.name)
LOG.debug('Acquiring lock "%s" by "%s"', name, f_name)
lock.acquire(blocking)
t2 = timeutils.now()
LOG.debug('Lock "%s" acquired by "%s" :: waited %0.3fs',
name, f_name, t2 - t1)
return t2
def __release(lock, acquired_time, f_name):
"""Release a lock ignoring exceptions."""
name = utils.convert_str(lock.name)
try:
lock.release()
held = timeutils.now() - acquired_time
LOG.debug('Lock "%s" released by "%s" :: held %0.3fs',
name, f_name, held)
except Exception as e:
LOG.error('Failed to release lock "%s": %s', name, e)
def synchronized(*lock_names: str,
blocking: bool = True,
coordinator: Coordinator = COORDINATOR) -> Callable:
"""Synchronization decorator.
:param str lock_names: Arbitrary number of Lock names.
:param blocking: If True, blocks until the lock is acquired.
If False, raises exception when not acquired. Otherwise,
the value is used as a timeout value and if lock is not acquired
after this number of seconds exception is raised. This is a keyword
only argument.
:param coordinator: Coordinator class to use when creating lock.
Defaults to the global coordinator. This is a keyword only argument.
:raises tooz.coordination.LockAcquireFailed: if lock is not acquired
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one process will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
Lock name can be formatted using Python format string syntax::
@synchronized('{f_name}-{vol.id}-{snap[name]}')
def foo(self, vol, snap):
...
Multiple locks can be requested simultaneously and the decorator will
reorder the names by rendered lock names to prevent potential deadlocks.
@synchronized('{f_name}-{vol.id}-{snap[name]}',
'{f_name}-{vol.id}.delete')
def foo(self, vol, snap):
...
Available field names are: decorated function parameters and
`f_name` as a decorated function name.
"""
@decorator.decorator
def _synchronized(f, *a, **k) -> Callable:
call_args = inspect.getcallargs(f, *a, **k)
call_args['f_name'] = f.__name__
# Prevent deadlocks not duplicating and sorting them by name to always
# acquire them in the same order.
names = sorted(set([name.format(**call_args) for name in lock_names]))
locks = [coordinator.get_lock(name) for name in names]
acquired_times = []
f_name = f.__name__
t1 = timeutils.now()
try:
if len(locks) > 1: # Don't pollute logs for single locks
LOG.debug('Acquiring %s locks by %s', len(locks), f_name)
for lock in locks:
acquired_times.append(__acquire(lock, blocking, f_name))
if len(locks) > 1:
t = timeutils.now() - t1
LOG.debug('Acquired %s locks by %s in %0.3fs',
len(locks), f_name, t)
return f(*a, **k)
finally:
for lock, acquired_time in zip(locks, acquired_times):
__release(lock, acquired_time, f_name)
return _synchronized
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/db/ 0000775 0000000 0000000 00000000000 15131732575 0020221 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/db/__init__.py 0000664 0000000 0000000 00000001442 15131732575 0022333 0 ustar 00root root 0000000 0000000 # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
DB abstraction for Cinder
"""
from cinder.db.api import * # noqa
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/db/alembic.ini 0000664 0000000 0000000 00000004006 15131732575 0022316 0 ustar 00root root 0000000 0000000 # A generic, single database configuration.
[alembic]
# path to migration scripts
script_location = %(here)s/migrations
# template used to generate migration files
# file_template = %%(rev)s_%%(slug)s
# timezone to use when rendering the date
# within the migration file as well as the filename.
# string value is passed to dateutil.tz.gettz()
# leave blank for localtime
# timezone =
# max length of characters to apply to the
# "slug" field
# truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false
# version location specification; this defaults
# to cinder/db/sqlalchemy/migrations/versions. When using multiple version
# directories, initial revisions must be specified with --version-path
# version_locations = %(here)s/bar %(here)s/bat cinder/db/sqlalchemy/migrations/versions
# the output encoding used when revision files
# are written from script.py.mako
# output_encoding = utf-8
sqlalchemy.url = sqlite:///cinder.db
[post_write_hooks]
# post_write_hooks defines scripts or Python functions that are run
# on newly generated revision scripts. See the documentation for further
# detail and examples
# format using "black" - use the console_scripts runner, against the "black" entrypoint
# hooks=black
# black.type=console_scripts
# black.entrypoint=black
# black.options=-l 79
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/db/api.py 0000664 0000000 0000000 00000213121 15131732575 0021344 0 ustar 00root root 0000000 0000000 # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for DB access.
Functions in this module are imported into the cinder.db namespace. Call these
functions from cinder.db namespace, not the cinder.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface. Currently, many of these objects are sqlalchemy objects that
implement a dictionary interface. However, a future goal is to have all of
these objects be simple dictionaries.
**Related Flags**
:connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/cinder/cinder.sqlite`.
:enable_new_services: when adding a new service to the database, is it in the
pool of available hardware (Default: True)
"""
from oslo_config import cfg
from oslo_db import api as oslo_db_api
from oslo_db import options as db_options
from cinder.api import common
from cinder.common import constants
from cinder.i18n import _
db_opts = [
cfg.BoolOpt('enable_new_services',
default=True,
help='Services to be added to the available pool on create'),
cfg.StrOpt('volume_name_template',
default='volume-%s',
help='Template string to be used to generate volume names'),
cfg.StrOpt('snapshot_name_template',
default='snapshot-%s',
help='Template string to be used to generate snapshot names'),
]
backup_opts = [
cfg.StrOpt('backup_name_template',
default='backup-%s',
help='Template string to be used to generate backup names'),
]
CONF = cfg.CONF
CONF.register_opts(db_opts)
CONF.register_opts(backup_opts)
db_options.set_defaults(CONF)
_BACKEND_MAPPING = {'sqlalchemy': 'cinder.db.sqlalchemy.api'}
IMPL = oslo_db_api.DBAPI.from_config(conf=CONF,
backend_mapping=_BACKEND_MAPPING,
lazy=True)
# The maximum value a signed INT type may have
MAX_INT = constants.DB_MAX_INT
###################
def dispose_engine():
"""Force the engine to establish new connections."""
# FIXME(jdg): When using sqlite if we do the dispose
# we seem to lose our DB here. Adding this check
# means we don't do the dispose, but we keep our sqlite DB
# This likely isn't the best way to handle this
if 'sqlite' not in IMPL.get_engine().name:
return IMPL.dispose_engine()
else:
return
###################
class Condition(object):
"""Class for normal condition values for conditional_update."""
def __init__(self, value, field=None):
self.value = value
# Field is optional and can be passed when getting the filter
self.field = field
def get_filter(self, model, field=None):
return IMPL.condition_db_filter(
model, self._get_field(field), self.value,
)
def _get_field(self, field=None):
# We must have a defined field on initialization or when called
field = field or self.field
if not field:
raise ValueError(_('Condition has no field.'))
return field
class Not(Condition):
"""Class for negated condition values for conditional_update.
By default NULL values will be treated like Python treats None instead of
how SQL treats it.
So for example when values are (1, 2) it will evaluate to True when we have
value 3 or NULL, instead of only with 3 like SQL does.
"""
def __init__(self, value, field=None, auto_none=True):
super(Not, self).__init__(value, field)
self.auto_none = auto_none
def get_filter(self, model, field=None):
# If implementation has a specific method use it
if hasattr(IMPL, 'condition_not_db_filter'):
return IMPL.condition_not_db_filter(model, self._get_field(field),
self.value, self.auto_none)
# Otherwise non negated object must adming ~ operator for not
return ~super(Not, self).get_filter(model, field)
class Case(object):
"""Class for conditional value selection for conditional_update."""
def __init__(self, whens, value=None, else_=None):
self.whens = whens
self.value = value
self.else_ = else_
###################
def resource_exists(context, model, resource_id):
return IMPL.resource_exists(context, model, resource_id)
def get_model_for_versioned_object(versioned_object):
return IMPL.get_model_for_versioned_object(versioned_object)
def get_by_id(context, model, id, *args, **kwargs):
return IMPL.get_by_id(context, model, id, *args, **kwargs)
###################
def is_orm_value(obj):
"""Check if object is an ORM field."""
return IMPL.is_orm_value(obj)
def conditional_update(
context,
model,
values,
expected_values,
filters=None,
include_deleted='no',
project_only=False,
order=None,
):
"""Compare-and-swap conditional update.
Update will only occur in the DB if conditions are met.
We have 4 different condition types we can use in expected_values:
- Equality: {'status': 'available'}
- Inequality: {'status': vol_obj.Not('deleting')}
- In range: {'status': ['available', 'error']
- Not in range: {'status': vol_obj.Not(['in-use', 'attaching'])
Method accepts additional filters, which are basically anything that can be
passed to a sqlalchemy query's filter method, for example:
.. code-block:: python
[~sql.exists().where(models.Volume.id == models.Snapshot.volume_id)]
We can select values based on conditions using Case objects in the 'values'
argument. For example:
.. code-block:: python
has_snapshot_filter = sql.exists().where(
models.Snapshot.volume_id == models.Volume.id
)
case_values = db.Case(
[(has_snapshot_filter, 'has-snapshot')],
else_='no-snapshot'
)
db.conditional_update(
context,
models.Volume,
{'status': case_values},
{'status': 'available'},
)
And we can use DB fields for example to store previous status in the
corresponding field even though we don't know which value is in the db from
those we allowed:
.. code-block:: python
db.conditional_update(
context,
models.Volume,
{'status': 'deleting', 'previous_status': models.Volume.status},
{'status': ('available', 'error')},
)
:param values: Dictionary of key-values to update in the DB.
:param expected_values: Dictionary of conditions that must be met for the
update to be executed.
:param filters: Iterable with additional filters.
:param include_deleted: Should the update include deleted items, this is
equivalent to read_deleted.
:param project_only: Should the query be limited to context's project.
:param order: Specific order of fields in which to update the values
:returns: Boolean indicating whether db rows were updated.
"""
return IMPL.conditional_update(
context,
model,
values,
expected_values,
filters,
include_deleted,
project_only,
order,
)
###################
def service_destroy(context, service_id):
"""Destroy the service or raise if it does not exist."""
return IMPL.service_destroy(context, service_id)
def service_get(context, service_id=None, backend_match_level=None, **filters):
"""Get a service that matches the criteria.
A possible filter is is_up=True and it will filter nodes that are down.
:param service_id: Id of the service.
:param filters: Filters for the query in the form of key/value.
:param backend_match_level: 'pool', 'backend', or 'host' for host and
cluster filters (as defined in _filter_host
method)
:raise ServiceNotFound: If service doesn't exist.
"""
return IMPL.service_get(context, service_id, backend_match_level,
**filters)
def service_get_all(context, backend_match_level=None, **filters):
"""Get all services that match the criteria.
A possible filter is is_up=True and it will filter nodes that are down,
as well as host_or_cluster, that lets you look for services using both
of these properties.
:param filters: Filters for the query in the form of key/value arguments.
:param backend_match_level: 'pool', 'backend', or 'host' for host and
cluster filters (as defined in _filter_host
method)
"""
return IMPL.service_get_all(context, backend_match_level, **filters)
def service_create(context, values):
"""Create a service from the values dictionary."""
return IMPL.service_create(context, values)
def service_update(context, service_id, values, retry=True):
"""Set the given properties on an service and update it.
Raises NotFound if service does not exist.
"""
return IMPL.service_update(context, service_id, values, retry)
def service_get_by_uuid(context, service_uuid):
"""Get a service by it's uuid.
Return Service ref or raise if it does not exist.
"""
return IMPL.service_get_by_uuid(context, service_uuid)
###############
def is_backend_frozen(context, host, cluster_name):
"""Check if a storage backend is frozen based on host and cluster_name."""
return IMPL.is_backend_frozen(context, host, cluster_name)
###############
def cluster_get(context, id=None, is_up=None, get_services=False,
services_summary=False, read_deleted='no',
name_match_level=None, **filters):
"""Get a cluster that matches the criteria.
:param id: Id of the cluster.
:param is_up: Boolean value to filter based on the cluster's up status.
:param get_services: If we want to load all services from this cluster.
:param services_summary: If we want to load num_hosts and
num_down_hosts fields.
:param read_deleted: Filtering based on delete status. Default value is
"no".
:param name_match_level: 'pool', 'backend', or 'host' for name filter (as
defined in _filter_host method)
:param filters: Field based filters in the form of key/value.
:raise ClusterNotFound: If cluster doesn't exist.
"""
return IMPL.cluster_get(context, id, is_up, get_services, services_summary,
read_deleted, name_match_level, **filters)
def cluster_get_all(context, is_up=None, get_services=False,
services_summary=False, read_deleted='no',
name_match_level=None, **filters):
"""Get all clusters that match the criteria.
:param is_up: Boolean value to filter based on the cluster's up status.
:param get_services: If we want to load all services from this cluster.
:param services_summary: If we want to load num_hosts and
num_down_hosts fields.
:param read_deleted: Filtering based on delete status. Default value is
"no".
:param name_match_level: 'pool', 'backend', or 'host' for name filter (as
defined in _filter_host method)
:param filters: Field based filters in the form of key/value.
"""
return IMPL.cluster_get_all(context, is_up, get_services, services_summary,
read_deleted, name_match_level, **filters)
def cluster_create(context, values):
"""Create a cluster from the values dictionary."""
return IMPL.cluster_create(context, values)
def cluster_update(context, cluster_id, values):
"""Set the given properties on an cluster and update it.
Raises ClusterNotFound if cluster does not exist.
"""
return IMPL.cluster_update(context, cluster_id, values)
def cluster_destroy(context, cluster_id):
"""Destroy the cluster or raise if it does not exist or has hosts.
:raise ClusterNotFound: If cluster doesn't exist.
"""
return IMPL.cluster_destroy(context, cluster_id)
###############
def volume_attach(context, values):
"""Attach a volume."""
return IMPL.volume_attach(context, values)
def volume_attached(
context,
attachment_id,
instance_uuid,
host_name,
mountpoint,
attach_mode='rw',
mark_attached=True,
):
"""Ensure that a volume is set as attached."""
return IMPL.volume_attached(
context,
attachment_id,
instance_uuid,
host_name,
mountpoint,
attach_mode,
mark_attached,
)
def volume_create(context, values):
"""Create a volume from the values dictionary."""
return IMPL.volume_create(context, values)
def volume_data_get_for_host(context, host, count_only=False):
"""Get (volume_count, gigabytes) for project."""
return IMPL.volume_data_get_for_host(context,
host,
count_only)
def volume_data_get_for_project(context, project_id, host=None):
"""Get (volume_count, gigabytes) for project."""
return IMPL.volume_data_get_for_project(context, project_id, host=host)
def volume_destroy(context, volume_id):
"""Destroy the volume or raise if it does not exist."""
return IMPL.volume_destroy(context, volume_id)
def volume_detached(context, volume_id, attachment_id):
"""Ensure that a volume is set as detached."""
return IMPL.volume_detached(context, volume_id, attachment_id)
def volume_get(context, volume_id):
"""Get a volume or raise if it does not exist."""
return IMPL.volume_get(context, volume_id)
def volume_get_all(context, marker=None, limit=None, sort_keys=None,
sort_dirs=None, filters=None, offset=None):
"""Get all volumes."""
return IMPL.volume_get_all(context, marker, limit, sort_keys=sort_keys,
sort_dirs=sort_dirs, filters=filters,
offset=offset)
def calculate_resource_count(context, resource_type, filters):
return IMPL.calculate_resource_count(context, resource_type, filters)
def volume_get_all_by_host(context, host, filters=None):
"""Get all volumes belonging to a host."""
return IMPL.volume_get_all_by_host(context, host, filters=filters)
def volume_update_all_by_service(context):
"""Update all volumes associated with an old service."""
return IMPL.volume_update_all_by_service(context)
def volume_get_all_by_group(context, group_id, filters=None):
"""Get all volumes belonging to a consistency group."""
return IMPL.volume_get_all_by_group(context, group_id, filters=filters)
def volume_get_all_by_generic_group(context, group_id, filters=None):
"""Get all volumes belonging to a generic volume group."""
return IMPL.volume_get_all_by_generic_group(context, group_id,
filters=filters)
def volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None, filters=None,
offset=None):
"""Get all volumes belonging to a project."""
return IMPL.volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters,
offset=offset)
def get_volume_summary(context, project_only, filters=None):
"""Get volume summary."""
return IMPL.get_volume_summary(context, project_only, filters)
def volume_update(context, volume_id, values):
"""Set the given properties on a volume and update it.
Raises NotFound if volume does not exist.
"""
return IMPL.volume_update(context, volume_id, values)
def volumes_update(context, values_list):
"""Set the given properties on a list of volumes and update them.
Raises NotFound if a volume does not exist.
"""
return IMPL.volumes_update(context, values_list)
def volume_include_in_cluster(context, cluster, partial_rename=True,
**filters):
"""Include all volumes matching the filters into a cluster.
When partial_rename is set we will not set the cluster_name with cluster
parameter value directly, we'll replace provided cluster_name or host
filter value with cluster instead.
This is useful when we want to replace just the cluster name but leave
the backend and pool information as it is. If we are using cluster_name
to filter, we'll use that same DB field to replace the cluster value and
leave the rest as it is. Likewise if we use the host to filter.
Returns the number of volumes that have been changed.
"""
return IMPL.volume_include_in_cluster(context, cluster, partial_rename,
**filters)
def volume_attachment_update(context, attachment_id, values):
return IMPL.volume_attachment_update(context, attachment_id, values)
def volume_attachment_get(context, attachment_id):
return IMPL.volume_attachment_get(context, attachment_id)
def volume_attachment_get_all_by_volume_id(context, volume_id):
return IMPL.volume_attachment_get_all_by_volume_id(context,
volume_id)
def volume_attachment_get_all_by_host(context, host, filters=None):
# FIXME(jdg): Not using filters
return IMPL.volume_attachment_get_all_by_host(context, host)
def volume_attachment_get_all_by_instance_uuid(context,
instance_uuid, filters=None):
# FIXME(jdg): Not using filters
return IMPL.volume_attachment_get_all_by_instance_uuid(context,
instance_uuid)
def volume_attachment_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
return IMPL.volume_attachment_get_all(context, filters, marker, limit,
offset, sort_keys, sort_dirs)
def volume_attachment_get_all_by_project(context, project_id, filters=None,
marker=None, limit=None, offset=None,
sort_keys=None, sort_dirs=None):
return IMPL.volume_attachment_get_all_by_project(context, project_id,
filters, marker, limit,
offset, sort_keys,
sort_dirs)
def attachment_destroy(context, attachment_id):
"""Destroy the attachment or raise if it does not exist."""
return IMPL.attachment_destroy(context, attachment_id)
def volume_update_status_based_on_attachment(context, volume_id):
"""Update volume status according to attached instance id"""
return IMPL.volume_update_status_based_on_attachment(context, volume_id)
def volume_has_snapshots_filter():
return IMPL.volume_has_snapshots_filter()
def volume_has_undeletable_snapshots_filter():
return IMPL.volume_has_undeletable_snapshots_filter()
def volume_has_snapshots_in_a_cgsnapshot_filter():
return IMPL.volume_has_snapshots_in_a_cgsnapshot_filter()
def volume_has_attachments_filter():
return IMPL.volume_has_attachments_filter()
def volume_qos_allows_retype(new_vol_type):
return IMPL.volume_qos_allows_retype(new_vol_type)
def volume_has_other_project_snp_filter():
return IMPL.volume_has_other_project_snp_filter()
####################
def snapshot_create(context, values):
"""Create a snapshot from the values dictionary."""
return IMPL.snapshot_create(context, values)
def snapshot_destroy(context, snapshot_id):
"""Destroy the snapshot or raise if it does not exist."""
return IMPL.snapshot_destroy(context, snapshot_id)
def snapshot_get(context, snapshot_id):
"""Get a snapshot or raise if it does not exist."""
return IMPL.snapshot_get(context, snapshot_id)
def snapshot_get_all(context, filters=None, marker=None, limit=None,
sort_keys=None, sort_dirs=None, offset=None):
"""Get all snapshots."""
return IMPL.snapshot_get_all(context, filters, marker, limit, sort_keys,
sort_dirs, offset)
def snapshot_get_all_by_project(context, project_id, filters=None, marker=None,
limit=None, sort_keys=None, sort_dirs=None,
offset=None):
"""Get all snapshots belonging to a project."""
return IMPL.snapshot_get_all_by_project(context, project_id, filters,
marker, limit, sort_keys,
sort_dirs, offset)
def snapshot_get_all_by_host(context, host, filters=None):
"""Get all snapshots belonging to a host.
:param host: Include include snapshots only for specified host.
:param filters: Filters for the query in the form of key/value.
"""
return IMPL.snapshot_get_all_by_host(context, host, filters)
def snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id):
"""Get all snapshots belonging to a cgsnapshot."""
return IMPL.snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id)
def snapshot_get_all_for_group_snapshot(context, group_snapshot_id):
"""Get all snapshots belonging to a group snapshot."""
return IMPL.snapshot_get_all_for_group_snapshot(context, group_snapshot_id)
def snapshot_get_all_for_volume(context, volume_id):
"""Get all snapshots for a volume."""
return IMPL.snapshot_get_all_for_volume(context, volume_id)
def snapshot_get_latest_for_volume(context, volume_id):
"""Get latest snapshot for a volume"""
return IMPL.snapshot_get_latest_for_volume(context, volume_id)
def snapshot_update(context, snapshot_id, values):
"""Set the given properties on an snapshot and update it.
Raises NotFound if snapshot does not exist.
"""
return IMPL.snapshot_update(context, snapshot_id, values)
def snapshot_data_get_for_project(context, project_id, volume_type_id=None,
host=None):
"""Get count and gigabytes used for snapshots for specified project."""
return IMPL.snapshot_data_get_for_project(context,
project_id,
volume_type_id,
host=host)
def snapshot_get_all_active_by_window(context, begin, end=None,
project_id=None):
"""Get all the snapshots inside the window.
Specifying a project_id will filter for a certain project.
"""
return IMPL.snapshot_get_all_active_by_window(context, begin, end,
project_id)
def get_snapshot_summary(context, project_only, filters=None):
"""Get snapshot summary."""
return IMPL.get_snapshot_summary(context, project_only, filters)
####################
def snapshot_metadata_get(context, snapshot_id):
"""Get all metadata for a snapshot."""
return IMPL.snapshot_metadata_get(context, snapshot_id)
def snapshot_metadata_delete(context, snapshot_id, key):
"""Delete the given metadata item."""
return IMPL.snapshot_metadata_delete(context, snapshot_id, key)
def snapshot_metadata_update(context, snapshot_id, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
return IMPL.snapshot_metadata_update(context, snapshot_id,
metadata, delete)
####################
def volume_metadata_get(context, volume_id):
"""Get all metadata for a volume."""
return IMPL.volume_metadata_get(context, volume_id)
def volume_metadata_delete(context, volume_id, key,
meta_type=common.METADATA_TYPES.user):
"""Delete the given metadata item."""
return IMPL.volume_metadata_delete(context, volume_id,
key, meta_type)
def volume_metadata_update(context, volume_id, metadata,
delete, meta_type=common.METADATA_TYPES.user):
"""Update metadata if it exists, otherwise create it."""
return IMPL.volume_metadata_update(context, volume_id, metadata,
delete, meta_type)
##################
def volume_admin_metadata_get(context, volume_id):
"""Get all administration metadata for a volume."""
return IMPL.volume_admin_metadata_get(context, volume_id)
def volume_admin_metadata_delete(context, volume_id, key):
"""Delete the given metadata item."""
return IMPL.volume_admin_metadata_delete(context, volume_id, key)
def volume_admin_metadata_update(context, volume_id, metadata, delete,
add=True, update=True):
"""Update metadata if it exists, otherwise create it."""
return IMPL.volume_admin_metadata_update(context, volume_id, metadata,
delete, add, update)
##################
def volume_type_create(context, values, projects=None):
"""Create a new volume type."""
return IMPL.volume_type_create(context, values, projects)
def volume_type_update(context, volume_type_id, values):
return IMPL.volume_type_update(context, volume_type_id, values)
def volume_type_get_all(context, inactive=False, filters=None, marker=None,
limit=None, sort_keys=None, sort_dirs=None,
offset=None, list_result=False):
"""Get all volume types.
:param context: context to query under
:param inactive: Include inactive volume types to the result set
:param filters: Filters for the query in the form of key/value.
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param list_result: For compatibility, if list_result = True, return a list
instead of dict.
:is_public: Filter volume types based on visibility:
* **True**: List public volume types only
* **False**: List private volume types only
* **None**: List both public and private volume types
:returns: list/dict of matching volume types
"""
return IMPL.volume_type_get_all(context, inactive, filters, marker=marker,
limit=limit, sort_keys=sort_keys,
sort_dirs=sort_dirs, offset=offset,
list_result=list_result)
def volume_type_get(context, id, inactive=False, expected_fields=None):
"""Get volume type by id.
:param context: context to query under
:param id: Volume type id to get.
:param inactive: Consider inactive volume types when searching
:param expected_fields: Return those additional fields.
Supported fields are: projects.
:returns: volume type
"""
return IMPL.volume_type_get(context, id, inactive, expected_fields)
def volume_type_get_by_name(context, name):
"""Get volume type by name."""
return IMPL.volume_type_get_by_name(context, name)
def volume_types_get_by_name_or_id(context, volume_type_list):
"""Get volume types by name or id."""
return IMPL.volume_types_get_by_name_or_id(context, volume_type_list)
def volume_type_qos_associations_get(context, qos_specs_id, inactive=False):
"""Get volume types that are associated with specific qos specs."""
return IMPL.volume_type_qos_associations_get(context,
qos_specs_id,
inactive)
def volume_type_qos_associate(context, type_id, qos_specs_id):
"""Associate a volume type with specific qos specs."""
return IMPL.volume_type_qos_associate(context, type_id, qos_specs_id)
def volume_type_qos_disassociate(context, qos_specs_id, type_id):
"""Disassociate a volume type from specific qos specs."""
return IMPL.volume_type_qos_disassociate(context, qos_specs_id, type_id)
def volume_type_qos_disassociate_all(context, qos_specs_id):
"""Disassociate all volume types from specific qos specs."""
return IMPL.volume_type_qos_disassociate_all(context,
qos_specs_id)
def volume_type_qos_specs_get(context, type_id):
"""Get all qos specs for given volume type."""
return IMPL.volume_type_qos_specs_get(context, type_id)
def volume_type_destroy(context, type_id):
"""Delete a volume type."""
return IMPL.volume_type_destroy(context, type_id)
def volume_get_all_active_by_window(context, begin, end=None, project_id=None):
"""Get all the volumes inside the window.
Specifying a project_id will filter for a certain project.
"""
return IMPL.volume_get_all_active_by_window(context, begin, end,
project_id)
def volume_type_access_get_all(context, type_id):
"""Get all volume type access of a volume type."""
return IMPL.volume_type_access_get_all(context, type_id)
def volume_type_access_add(context, type_id, project_id):
"""Add volume type access for project."""
return IMPL.volume_type_access_add(context, type_id, project_id)
def volume_type_access_remove(context, type_id, project_id):
"""Remove volume type access for project."""
return IMPL.volume_type_access_remove(context, type_id, project_id)
def project_default_volume_type_set(context, volume_type_id, project_id):
"""Set default volume type for a project"""
return IMPL.project_default_volume_type_set(context, volume_type_id,
project_id)
def project_default_volume_type_get(context, project_id=None):
"""Get default volume type for a project"""
return IMPL.project_default_volume_type_get(context, project_id)
def project_default_volume_type_unset(context, project_id):
"""Unset default volume type for a project (hard delete)"""
return IMPL.project_default_volume_type_unset(context, project_id)
def get_all_projects_with_default_type(context, volume_type_id):
"""Get all the projects associated with a default type"""
return IMPL.get_all_projects_with_default_type(context, volume_type_id)
####################
def group_type_create(context, values, projects=None):
"""Create a new group type."""
return IMPL.group_type_create(context, values, projects)
def group_type_update(context, group_type_id, values):
return IMPL.group_type_update(context, group_type_id, values)
def group_type_get_all(context, inactive=False, filters=None, marker=None,
limit=None, sort_keys=None, sort_dirs=None,
offset=None, list_result=False):
"""Get all group types.
:param context: context to query under
:param inactive: Include inactive group types to the result set
:param filters: Filters for the query in the form of key/value.
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param list_result: For compatibility, if list_result = True, return a list
instead of dict.
:is_public: Filter group types based on visibility:
* **True**: List public group types only
* **False**: List private group types only
* **None**: List both public and private group types
:returns: list/dict of matching group types
"""
return IMPL.group_type_get_all(context, inactive, filters, marker=marker,
limit=limit, sort_keys=sort_keys,
sort_dirs=sort_dirs, offset=offset,
list_result=list_result)
def group_type_get(context, id, inactive=False, expected_fields=None):
"""Get group type by id.
:param context: context to query under
:param id: Group type id to get.
:param inactive: Consider inactive group types when searching
:param expected_fields: Return those additional fields.
Supported fields are: projects.
:returns: group type
"""
return IMPL.group_type_get(context, id, inactive, expected_fields)
def group_type_get_by_name(context, name):
"""Get group type by name."""
return IMPL.group_type_get_by_name(context, name)
def group_types_get_by_name_or_id(context, group_type_list):
"""Get group types by name or id."""
return IMPL.group_types_get_by_name_or_id(context, group_type_list)
def group_type_destroy(context, type_id):
"""Delete a group type."""
return IMPL.group_type_destroy(context, type_id)
def group_type_access_get_all(context, type_id):
"""Get all group type access of a group type."""
return IMPL.group_type_access_get_all(context, type_id)
def group_type_access_add(context, type_id, project_id):
"""Add group type access for project."""
return IMPL.group_type_access_add(context, type_id, project_id)
def group_type_access_remove(context, type_id, project_id):
"""Remove group type access for project."""
return IMPL.group_type_access_remove(context, type_id, project_id)
def volume_type_get_all_by_group(context, group_id):
"""Get all volumes in a group."""
return IMPL.volume_type_get_all_by_group(context, group_id)
####################
def volume_type_extra_specs_get(context, volume_type_id):
"""Get all extra specs for a volume type."""
return IMPL.volume_type_extra_specs_get(context, volume_type_id)
def volume_type_extra_specs_delete(context, volume_type_id, key):
"""Delete the given extra specs item."""
return IMPL.volume_type_extra_specs_delete(context, volume_type_id, key)
def volume_type_extra_specs_update_or_create(
context, volume_type_id, extra_specs,
):
"""Create or update volume type extra specs.
This adds or modifies the key/value pairs specified in the extra specs dict
argument.
"""
return IMPL.volume_type_extra_specs_update_or_create(
context, volume_type_id, extra_specs,
)
###################
def group_type_specs_get(context, group_type_id):
"""Get all group specs for a group type."""
return IMPL.group_type_specs_get(context, group_type_id)
def group_type_specs_delete(context, group_type_id, key):
"""Delete the given group specs item."""
return IMPL.group_type_specs_delete(context, group_type_id, key)
def group_type_specs_update_or_create(context,
group_type_id,
group_specs):
"""Create or update group type specs.
This adds or modifies the key/value pairs specified in the group specs dict
argument.
"""
return IMPL.group_type_specs_update_or_create(context,
group_type_id,
group_specs)
###################
def volume_type_encryption_get(context, volume_type_id):
return IMPL.volume_type_encryption_get(context, volume_type_id)
def volume_type_encryption_delete(context, volume_type_id):
return IMPL.volume_type_encryption_delete(context, volume_type_id)
def volume_type_encryption_create(context, volume_type_id, values):
return IMPL.volume_type_encryption_create(context, volume_type_id,
values)
def volume_type_encryption_update(context, volume_type_id, values):
return IMPL.volume_type_encryption_update(context, volume_type_id, values)
def volume_type_encryption_volume_get(context, volume_type_id):
return IMPL.volume_type_encryption_volume_get(context, volume_type_id)
def volume_encryption_metadata_get(context, volume_id):
return IMPL.volume_encryption_metadata_get(context, volume_id)
###################
def qos_specs_create(context, values):
"""Create a qos_specs."""
return IMPL.qos_specs_create(context, values)
def qos_specs_get(context, qos_specs_id, inactive=False):
"""Get all specification for a given qos_specs."""
return IMPL.qos_specs_get(context, qos_specs_id, inactive)
def qos_specs_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
"""Get all qos_specs."""
return IMPL.qos_specs_get_all(context, filters=filters, marker=marker,
limit=limit, offset=offset,
sort_keys=sort_keys, sort_dirs=sort_dirs)
def qos_specs_get_by_name(context, name, inactive=False):
"""Get all specification for a given qos_specs."""
return IMPL.qos_specs_get_by_name(context, name, inactive)
def qos_specs_associations_get(context, qos_specs_id):
"""Get all associated volume types for a given qos_specs."""
return IMPL.qos_specs_associations_get(context, qos_specs_id)
def qos_specs_associate(context, qos_specs_id, type_id):
"""Associate qos_specs from volume type."""
return IMPL.qos_specs_associate(context, qos_specs_id, type_id)
def qos_specs_disassociate(context, qos_specs_id, type_id):
"""Disassociate qos_specs from volume type."""
return IMPL.qos_specs_disassociate(context, qos_specs_id, type_id)
def qos_specs_disassociate_all(context, qos_specs_id):
"""Disassociate qos_specs from all entities."""
return IMPL.qos_specs_disassociate_all(context, qos_specs_id)
def qos_specs_delete(context, qos_specs_id):
"""Delete the qos_specs."""
return IMPL.qos_specs_delete(context, qos_specs_id)
def qos_specs_item_delete(context, qos_specs_id, key):
"""Delete specified key in the qos_specs."""
return IMPL.qos_specs_item_delete(context, qos_specs_id, key)
def qos_specs_update(context, qos_specs_id, values):
"""Update qos specs.
This adds or modifies the key/value pairs specified in the
specs dict argument for a given qos_specs.
"""
return IMPL.qos_specs_update(context, qos_specs_id, values)
###################
def volume_glance_metadata_create(context, volume_id, key, value):
"""Update the Glance metadata for the specified volume."""
return IMPL.volume_glance_metadata_create(context,
volume_id,
key,
value)
def volume_glance_metadata_bulk_create(context, volume_id, metadata):
"""Add Glance metadata for specified volume (multiple pairs)."""
return IMPL.volume_glance_metadata_bulk_create(context, volume_id,
metadata)
def volume_glance_metadata_get_all(context):
"""Return the glance metadata for all volumes."""
return IMPL.volume_glance_metadata_get_all(context)
def volume_glance_metadata_get(context, volume_id):
"""Return the glance metadata for a volume."""
return IMPL.volume_glance_metadata_get(context, volume_id)
def volume_glance_metadata_list_get(context, volume_id_list):
"""Return the glance metadata for a volume list."""
return IMPL.volume_glance_metadata_list_get(context, volume_id_list)
def volume_snapshot_glance_metadata_get(context, snapshot_id):
"""Return the Glance metadata for the specified snapshot."""
return IMPL.volume_snapshot_glance_metadata_get(context, snapshot_id)
def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id):
"""Update the Glance metadata for a snapshot.
This will copy all of the key:value pairs from the originating volume,
to ensure that a volume created from the snapshot will retain the
original metadata.
"""
return IMPL.volume_glance_metadata_copy_to_snapshot(context, snapshot_id,
volume_id)
def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id):
"""Update the Glance metadata from a volume (created from a snapshot).
This will copy all of the key:value pairs from the originating snapshot,
to ensure that the Glance metadata from the original volume is retained.
"""
return IMPL.volume_glance_metadata_copy_to_volume(context, volume_id,
snapshot_id)
def volume_glance_metadata_delete_by_volume(context, volume_id):
"""Delete the glance metadata for a volume."""
return IMPL.volume_glance_metadata_delete_by_volume(context, volume_id)
def volume_glance_metadata_delete_by_snapshot(context, snapshot_id):
"""Delete the glance metadata for a snapshot."""
return IMPL.volume_glance_metadata_delete_by_snapshot(context, snapshot_id)
def volume_glance_metadata_copy_from_volume_to_volume(context,
src_volume_id,
volume_id):
"""Update the Glance metadata for a volume.
Update the Glance metadata for a volume by copying all of the key:value
pairs from the originating volume.
This is so that a volume created from the volume (clone) will retain the
original metadata.
"""
return IMPL.volume_glance_metadata_copy_from_volume_to_volume(
context,
src_volume_id,
volume_id)
###################
def quota_create(context, project_id, resource, limit):
"""Create a quota for the given project and resource."""
return IMPL.quota_create(context, project_id, resource, limit)
def quota_get(context, project_id, resource):
"""Retrieve a quota or raise if it does not exist."""
return IMPL.quota_get(context, project_id, resource)
def quota_get_all_by_project(context, project_id):
"""Retrieve all quotas associated with a given project."""
return IMPL.quota_get_all_by_project(context, project_id)
def quota_update(context, project_id, resource, limit):
"""Update a quota or raise if it does not exist."""
return IMPL.quota_update(context, project_id, resource, limit)
def quota_update_resource(context, old_res, new_res):
"""Update resource of quotas."""
return IMPL.quota_update_resource(context, old_res, new_res)
def quota_destroy(context, project_id, resource):
"""Destroy the quota or raise if it does not exist."""
return IMPL.quota_destroy(context, project_id, resource)
###################
def quota_class_create(context, class_name, resource, limit):
"""Create a quota class for the given name and resource."""
return IMPL.quota_class_create(context, class_name, resource, limit)
def quota_class_get(context, class_name, resource):
"""Retrieve a quota class or raise if it does not exist."""
return IMPL.quota_class_get(context, class_name, resource)
def quota_class_get_defaults(context):
"""Retrieve all default quotas."""
return IMPL.quota_class_get_defaults(context)
def quota_class_get_all_by_name(context, class_name):
"""Retrieve all quotas associated with a given quota class."""
return IMPL.quota_class_get_all_by_name(context, class_name)
def quota_class_update(context, class_name, resource, limit):
"""Update a quota class or raise if it does not exist."""
return IMPL.quota_class_update(context, class_name, resource, limit)
def quota_class_update_resource(context, old_res, new_res):
"""Update resource name in quota_class."""
return IMPL.quota_class_update_resource(context, old_res, new_res)
def quota_class_destroy(context, class_name, resource):
"""Destroy the quota class or raise if it does not exist."""
return IMPL.quota_class_destroy(context, class_name, resource)
def quota_class_destroy_all_by_name(context, class_name):
"""Destroy all quotas associated with a given quota class."""
return IMPL.quota_class_destroy_all_by_name(context, class_name)
###################
def quota_usage_get(context, project_id, resource):
"""Retrieve a quota usage or raise if it does not exist."""
return IMPL.quota_usage_get(context, project_id, resource)
def quota_usage_get_all_by_project(context, project_id):
"""Retrieve all usage associated with a given resource."""
return IMPL.quota_usage_get_all_by_project(context, project_id)
###################
def quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age, project_id=None):
"""Check quotas and create appropriate reservations."""
return IMPL.quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age, project_id=project_id)
def reservation_commit(context, reservations, project_id=None):
"""Commit quota reservations."""
return IMPL.reservation_commit(context, reservations,
project_id=project_id)
def reservation_rollback(context, reservations, project_id=None):
"""Roll back quota reservations."""
return IMPL.reservation_rollback(context, reservations,
project_id=project_id)
def quota_destroy_by_project(context, project_id):
"""Destroy all quotas associated with a given project."""
return IMPL.quota_destroy_by_project(context, project_id)
def reservation_expire(context):
"""Roll back any expired reservations."""
return IMPL.reservation_expire(context)
def quota_usage_update_resource(context, old_res, new_res):
"""Update resource field in quota_usages."""
return IMPL.quota_usage_update_resource(context, old_res, new_res)
###################
def backup_get(context, backup_id, read_deleted=None, project_only=True):
"""Get a backup or raise if it does not exist."""
return IMPL.backup_get(context, backup_id, read_deleted, project_only)
def backup_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
"""Get all backups."""
return IMPL.backup_get_all(context, filters=filters, marker=marker,
limit=limit, offset=offset, sort_keys=sort_keys,
sort_dirs=sort_dirs)
def backup_get_all_by_host(context, host):
"""Get all backups belonging to a host."""
return IMPL.backup_get_all_by_host(context, host)
def backup_create(context, values):
"""Create a backup from the values dictionary."""
return IMPL.backup_create(context, values)
def backup_metadata_get(context, backup_id):
return IMPL.backup_metadata_get(context, backup_id)
def backup_metadata_update(context, backup_id, metadata, delete):
return IMPL.backup_metadata_update(context, backup_id, metadata, delete)
def backup_get_all_by_project(context, project_id, filters=None, marker=None,
limit=None, offset=None, sort_keys=None,
sort_dirs=None):
"""Get all backups belonging to a project."""
return IMPL.backup_get_all_by_project(context, project_id,
filters=filters, marker=marker,
limit=limit, offset=offset,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
def backup_get_all_by_volume(context, volume_id, vol_project_id, filters=None):
"""Get all backups belonging to a volume."""
return IMPL.backup_get_all_by_volume(context, volume_id, vol_project_id,
filters=filters)
def backup_get_all_active_by_window(context, begin, end=None, project_id=None):
"""Get all the backups inside the window.
Specifying a project_id will filter for a certain project.
"""
return IMPL.backup_get_all_active_by_window(context, begin, end,
project_id)
def backup_update(context, backup_id, values):
"""Set the given properties on a backup and update it.
Raises NotFound if backup does not exist.
"""
return IMPL.backup_update(context, backup_id, values)
def backup_destroy(context, backup_id):
"""Destroy the backup or raise if it does not exist."""
return IMPL.backup_destroy(context, backup_id)
###################
def transfer_get(context, transfer_id):
"""Get a volume transfer record or raise if it does not exist."""
return IMPL.transfer_get(context, transfer_id)
def transfer_get_all(context, marker=None, limit=None, sort_keys=None,
sort_dirs=None, filters=None, offset=None):
"""Get all volume transfer records."""
return IMPL.transfer_get_all(context, marker=marker, limit=limit,
sort_keys=sort_keys, sort_dirs=sort_dirs,
filters=filters, offset=offset)
def transfer_get_all_by_project(context, project_id, marker=None,
limit=None, sort_keys=None,
sort_dirs=None, filters=None, offset=None):
"""Get all volume transfer records for specified project."""
return IMPL.transfer_get_all_by_project(context, project_id, marker=marker,
limit=limit, sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters, offset=offset)
def transfer_create(context, values):
"""Create an entry in the transfers table."""
return IMPL.transfer_create(context, values)
def transfer_destroy(context, transfer_id):
"""Destroy a record in the volume transfer table."""
return IMPL.transfer_destroy(context, transfer_id)
def transfer_accept(context, transfer_id, user_id, project_id,
no_snapshots=False):
"""Accept a volume transfer."""
return IMPL.transfer_accept(context, transfer_id, user_id, project_id,
no_snapshots=no_snapshots)
###################
def consistencygroup_get(context, consistencygroup_id):
"""Get a consistencygroup or raise if it does not exist."""
return IMPL.consistencygroup_get(context, consistencygroup_id)
def consistencygroup_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
"""Get all consistencygroups."""
return IMPL.consistencygroup_get_all(context, filters=filters,
marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys,
sort_dirs=sort_dirs)
def consistencygroup_create(context, values, cg_snap_id=None, cg_id=None):
"""Create a consistencygroup from the values dictionary."""
return IMPL.consistencygroup_create(context, values, cg_snap_id, cg_id)
def consistencygroup_get_all_by_project(context, project_id, filters=None,
marker=None, limit=None, offset=None,
sort_keys=None, sort_dirs=None):
"""Get all consistencygroups belonging to a project."""
return IMPL.consistencygroup_get_all_by_project(context, project_id,
filters=filters,
marker=marker, limit=limit,
offset=offset,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
def consistencygroup_update(context, consistencygroup_id, values):
"""Set the given properties on a consistencygroup and update it.
Raises NotFound if consistencygroup does not exist.
"""
return IMPL.consistencygroup_update(context, consistencygroup_id, values)
def consistencygroup_destroy(context, consistencygroup_id):
"""Destroy the consistencygroup or raise if it does not exist."""
return IMPL.consistencygroup_destroy(context, consistencygroup_id)
def cg_has_cgsnapshot_filter():
"""Return a filter that checks if a CG has CG Snapshots."""
return IMPL.cg_has_cgsnapshot_filter()
def cg_has_volumes_filter(attached_or_with_snapshots=False):
"""Return a filter to check if a CG has volumes.
When attached_or_with_snapshots parameter is given a True value only
attached volumes or those with snapshots will be considered.
"""
return IMPL.cg_has_volumes_filter(attached_or_with_snapshots)
def cg_creating_from_src(cg_id=None, cgsnapshot_id=None):
"""Return a filter to check if a CG is being used as creation source.
Returned filter is meant to be used in the Conditional Update mechanism and
checks if provided CG ID or CG Snapshot ID is currently being used to
create another CG.
This filter will not include CGs that have used the ID but have already
finished their creation (status is no longer creating).
Filter uses a subquery that allows it to be used on updates to the
consistencygroups table.
"""
return IMPL.cg_creating_from_src(cg_id, cgsnapshot_id)
def consistencygroup_include_in_cluster(context, cluster, partial_rename=True,
**filters):
"""Include all consistency groups matching the filters into a cluster.
When partial_rename is set we will not set the cluster_name with cluster
parameter value directly, we'll replace provided cluster_name or host
filter value with cluster instead.
This is useful when we want to replace just the cluster name but leave
the backend and pool information as it is. If we are using cluster_name
to filter, we'll use that same DB field to replace the cluster value and
leave the rest as it is. Likewise if we use the host to filter.
Returns the number of consistency groups that have been changed.
"""
return IMPL.consistencygroup_include_in_cluster(context, cluster,
partial_rename,
**filters)
def group_include_in_cluster(context, cluster, partial_rename=True, **filters):
"""Include all generic groups matching the filters into a cluster.
When partial_rename is set we will not set the cluster_name with cluster
parameter value directly, we'll replace provided cluster_name or host
filter value with cluster instead.
This is useful when we want to replace just the cluster name but leave
the backend and pool information as it is. If we are using cluster_name
to filter, we'll use that same DB field to replace the cluster value and
leave the rest as it is. Likewise if we use the host to filter.
Returns the number of generic groups that have been changed.
"""
return IMPL.group_include_in_cluster(context, cluster, partial_rename,
**filters)
###################
def group_get(context, group_id):
"""Get a group or raise if it does not exist."""
return IMPL.group_get(context, group_id)
def group_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
"""Get all groups."""
return IMPL.group_get_all(context, filters=filters,
marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys,
sort_dirs=sort_dirs)
def group_create(
context,
values,
group_snapshot_id=None,
source_group_id=None,
):
"""Create a group from the values dictionary."""
return IMPL.group_create(
context,
values,
group_snapshot_id,
source_group_id,
)
def group_get_all_by_project(context, project_id, filters=None,
marker=None, limit=None, offset=None,
sort_keys=None, sort_dirs=None):
"""Get all groups belonging to a project."""
return IMPL.group_get_all_by_project(context, project_id,
filters=filters,
marker=marker, limit=limit,
offset=offset,
sort_keys=sort_keys,
sort_dirs=sort_dirs)
def group_update(context, group_id, values):
"""Set the given properties on a group and update it.
Raises NotFound if group does not exist.
"""
return IMPL.group_update(context, group_id, values)
def group_destroy(context, group_id):
"""Destroy the group or raise if it does not exist."""
return IMPL.group_destroy(context, group_id)
def group_has_group_snapshot_filter():
"""Return a filter that checks if a Group has Group Snapshots."""
return IMPL.group_has_group_snapshot_filter()
def group_has_volumes_filter(attached_or_with_snapshots=False):
"""Return a filter to check if a Group has volumes.
When attached_or_with_snapshots parameter is given a True value only
attached volumes or those with snapshots will be considered.
"""
return IMPL.group_has_volumes_filter(attached_or_with_snapshots)
def group_creating_from_src(group_id=None, group_snapshot_id=None):
"""Return a filter to check if a Group is being used as creation source.
Returned filter is meant to be used in the Conditional Update mechanism and
checks if provided Group ID or Group Snapshot ID is currently being used to
create another Group.
This filter will not include Groups that have used the ID but have already
finished their creation (status is no longer creating).
Filter uses a subquery that allows it to be used on updates to the
groups table.
"""
return IMPL.group_creating_from_src(group_id, group_snapshot_id)
def group_volume_type_mapping_create(context, group_id, volume_type_id):
"""Create a group volume_type mapping entry."""
return IMPL.group_volume_type_mapping_create(context, group_id,
volume_type_id)
###################
def cgsnapshot_get(context, cgsnapshot_id):
"""Get a cgsnapshot or raise if it does not exist."""
return IMPL.cgsnapshot_get(context, cgsnapshot_id)
def cgsnapshot_get_all(context, filters=None):
"""Get all cgsnapshots."""
return IMPL.cgsnapshot_get_all(context, filters)
def cgsnapshot_create(context, values):
"""Create a cgsnapshot from the values dictionary."""
return IMPL.cgsnapshot_create(context, values)
def cgsnapshot_get_all_by_group(context, group_id, filters=None):
"""Get all cgsnapshots belonging to a consistency group."""
return IMPL.cgsnapshot_get_all_by_group(context, group_id, filters)
def cgsnapshot_get_all_by_project(context, project_id, filters=None):
"""Get all cgsnapshots belonging to a project."""
return IMPL.cgsnapshot_get_all_by_project(context, project_id, filters)
def cgsnapshot_update(context, cgsnapshot_id, values):
"""Set the given properties on a cgsnapshot and update it.
Raises NotFound if cgsnapshot does not exist.
"""
return IMPL.cgsnapshot_update(context, cgsnapshot_id, values)
def cgsnapshot_destroy(context, cgsnapshot_id):
"""Destroy the cgsnapshot or raise if it does not exist."""
return IMPL.cgsnapshot_destroy(context, cgsnapshot_id)
def cgsnapshot_creating_from_src():
"""Get a filter that checks if a CGSnapshot is being created from a CG."""
return IMPL.cgsnapshot_creating_from_src()
###################
def group_snapshot_get(context, group_snapshot_id):
"""Get a group snapshot or raise if it does not exist."""
return IMPL.group_snapshot_get(context, group_snapshot_id)
def group_snapshot_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
"""Get all group snapshots."""
return IMPL.group_snapshot_get_all(context, filters, marker, limit,
offset, sort_keys, sort_dirs)
def group_snapshot_create(context, values):
"""Create a group snapshot from the values dictionary."""
return IMPL.group_snapshot_create(context, values)
def group_snapshot_get_all_by_group(context, group_id, filters=None,
marker=None, limit=None,
offset=None, sort_keys=None,
sort_dirs=None):
"""Get all group snapshots belonging to a group."""
return IMPL.group_snapshot_get_all_by_group(context, group_id,
filters, marker, limit,
offset, sort_keys, sort_dirs)
def group_snapshot_get_all_by_project(context, project_id, filters=None,
marker=None, limit=None,
offset=None, sort_keys=None,
sort_dirs=None):
"""Get all group snapshots belonging to a project."""
return IMPL.group_snapshot_get_all_by_project(context, project_id,
filters, marker, limit,
offset, sort_keys, sort_dirs)
def group_snapshot_update(context, group_snapshot_id, values):
"""Set the given properties on a group snapshot and update it.
Raises NotFound if group snapshot does not exist.
"""
return IMPL.group_snapshot_update(context, group_snapshot_id, values)
def group_snapshot_destroy(context, group_snapshot_id):
"""Destroy the group snapshot or raise if it does not exist."""
return IMPL.group_snapshot_destroy(context, group_snapshot_id)
def group_snapshot_creating_from_src():
"""Get a filter to check if a grp snapshot is being created from a grp."""
return IMPL.group_snapshot_creating_from_src()
###################
def purge_deleted_rows(context, age_in_days):
"""Purge deleted rows older than given age from cinder tables
Raises InvalidParameterValue if age_in_days is incorrect.
:returns: number of deleted rows
"""
return IMPL.purge_deleted_rows(context, age_in_days=age_in_days)
def get_booleans_for_table(table_name):
return IMPL.get_booleans_for_table(table_name)
###################
def reset_active_backend(context, enable_replication, active_backend_id,
backend_host):
"""Reset the active backend for a host."""
return IMPL.reset_active_backend(context, enable_replication,
active_backend_id, backend_host)
###################
def driver_initiator_data_insert_by_key(context, initiator,
namespace, key, value):
"""Updates DriverInitiatorData entry.
Sets the value for the specified key within the namespace.
"""
return IMPL.driver_initiator_data_insert_by_key(context,
initiator,
namespace,
key,
value)
def driver_initiator_data_get(context, initiator, namespace):
"""Query for an DriverInitiatorData that has the specified key"""
return IMPL.driver_initiator_data_get(context, initiator, namespace)
###################
def image_volume_cache_create(context, host, cluster_name, image_id,
image_updated_at, volume_id, size):
"""Create a new image volume cache entry."""
return IMPL.image_volume_cache_create(context,
host,
cluster_name,
image_id,
image_updated_at,
volume_id,
size)
def image_volume_cache_delete(context, volume_id):
"""Delete an image volume cache entry specified by volume id."""
return IMPL.image_volume_cache_delete(context, volume_id)
def image_volume_cache_get_and_update_last_used(context, image_id, **filters):
"""Query for an image volume cache entry."""
return IMPL.image_volume_cache_get_and_update_last_used(context,
image_id,
**filters)
def image_volume_cache_get_by_volume_id(context, volume_id):
"""Query to see if a volume id is an image-volume contained in the cache"""
return IMPL.image_volume_cache_get_by_volume_id(context, volume_id)
def image_volume_cache_get_all(context, **filters):
"""Query for all image volume cache entry for a host."""
return IMPL.image_volume_cache_get_all(context, **filters)
def image_volume_cache_include_in_cluster(context, cluster,
partial_rename=True, **filters):
"""Include in cluster image volume cache entries matching the filters.
When partial_rename is set we will not set the cluster_name with cluster
parameter value directly, we'll replace provided cluster_name or host
filter value with cluster instead.
This is useful when we want to replace just the cluster name but leave
the backend and pool information as it is. If we are using cluster_name
to filter, we'll use that same DB field to replace the cluster value and
leave the rest as it is. Likewise if we use the host to filter.
Returns the number of volumes that have been changed.
"""
return IMPL.image_volume_cache_include_in_cluster(
context, cluster, partial_rename, **filters)
###################
def message_get(context, message_id):
"""Return a message with the specified ID."""
return IMPL.message_get(context, message_id)
def message_get_all(context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
return IMPL.message_get_all(context, filters=filters, marker=marker,
limit=limit, offset=offset,
sort_keys=sort_keys, sort_dirs=sort_dirs)
def message_create(context, values):
"""Creates a new message with the specified values."""
return IMPL.message_create(context, values)
def message_destroy(context, message_id):
"""Deletes message with the specified ID."""
return IMPL.message_destroy(context, message_id)
def cleanup_expired_messages(context):
"""Soft delete expired messages"""
return IMPL.cleanup_expired_messages(context)
###################
def worker_create(context, **values):
"""Create a worker entry from optional arguments."""
return IMPL.worker_create(context, **values)
def worker_get(context, **filters):
"""Get a worker or raise exception if it does not exist."""
return IMPL.worker_get(context, **filters)
def worker_get_all(context, until=None, db_filters=None, **filters):
"""Get all workers that match given criteria."""
return IMPL.worker_get_all(context, until=until, db_filters=db_filters,
**filters)
def worker_update(context, id, filters=None, orm_worker=None, **values):
"""Update a worker with given values."""
return IMPL.worker_update(context, id, filters=filters,
orm_worker=orm_worker, **values)
def worker_claim_for_cleanup(context, claimer_id, orm_worker):
"""Soft delete a worker, change the service_id and update the worker."""
return IMPL.worker_claim_for_cleanup(context, claimer_id, orm_worker)
def worker_destroy(context, **filters):
"""Delete a worker (no soft delete)."""
return IMPL.worker_destroy(context, **filters)
###################
def attachment_specs_exist(context):
"""Check if there are attachment specs left."""
return IMPL.attachment_specs_exist(context)
def attachment_specs_get(context, attachment_id):
"""DEPRECATED: Get all specs for an attachment."""
return IMPL.attachment_specs_get(context, attachment_id)
def attachment_specs_delete(context, attachment_id, key):
"""DEPRECATED: Delete the given attachment specs item."""
return IMPL.attachment_specs_delete(context, attachment_id, key)
def attachment_specs_update_or_create(context,
attachment_id,
specs):
"""DEPRECATED: Create or update attachment specs.
This adds or modifies the key/value pairs specified in the attachment
specs dict argument.
"""
return IMPL.attachment_specs_update_or_create(context,
attachment_id,
specs)
###################
# TODO: (D Release) remove method and this comment
def remove_temporary_admin_metadata_data_migration(context, max_count):
return IMPL.remove_temporary_admin_metadata_data_migration(
context, max_count)
def get_projects(context, model, read_deleted="no"):
return IMPL.get_projects(context, model, read_deleted=read_deleted)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/db/base.py 0000664 0000000 0000000 00000001752 15131732575 0021512 0 ustar 00root root 0000000 0000000 # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for classes that need modular database access."""
import cinder.db
class Base(object):
"""DB driver is injected in the init method."""
def __init__(self):
super().__init__()
self.db = cinder.db
self.db.dispose_engine()
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/db/migration.py 0000664 0000000 0000000 00000007270 15131732575 0022572 0 ustar 00root root 0000000 0000000 # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Database setup and migration commands."""
import os
from alembic import command as alembic_api
from alembic import config as alembic_config
from alembic import migration as alembic_migration
from oslo_config import cfg
from oslo_db import options
from oslo_log import log as logging
from cinder.db.sqlalchemy import api as db_api
options.set_defaults(cfg.CONF)
LOG = logging.getLogger(__name__)
def _find_alembic_conf():
"""Get the project's alembic configuration
:returns: An instance of ``alembic.config.Config``
"""
path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), 'alembic.ini')
config = alembic_config.Config(os.path.abspath(path))
# we don't want to use the logger configuration from the file, which is
# only really intended for the CLI
# https://stackoverflow.com/a/42691781/613428
config.attributes['configure_logger'] = False
return config
def _upgrade_alembic(engine, config, version):
# re-use the connection rather than creating a new one
with engine.begin() as connection:
config.attributes['connection'] = connection
alembic_api.upgrade(config, version or 'head')
def db_version():
"""Get database version."""
engine = db_api.get_engine()
with engine.connect() as conn:
m_context = alembic_migration.MigrationContext.configure(conn)
return m_context.get_current_revision()
def db_sync(version=None, engine=None):
"""Migrate the database to `version` or the most recent version.
We're currently straddling two migration systems, sqlalchemy-migrate and
alembic. This handles both by ensuring we switch from one to the other at
the appropriate moment.
"""
# if the user requested a specific version, check if it's an integer: if
# so, we're almost certainly in sqlalchemy-migrate land and won't support
# that
if version is not None and version.isdigit():
raise ValueError(
'You requested an sqlalchemy-migrate database version; this is '
'no longer supported'
)
if engine is None:
engine = db_api.get_engine()
config = _find_alembic_conf()
# discard the URL encoded in alembic.ini in favour of the URL configured
# for the engine by the database fixtures, casting from
# 'sqlalchemy.engine.url.URL' to str in the process. This returns a
# RFC-1738 quoted URL, which means that a password like "foo@" will be
# turned into "foo%40". This in turns causes a problem for
# set_main_option() because that uses ConfigParser.set, which (by design)
# uses *python* interpolation to write the string out ... where "%" is the
# special python interpolation character! Avoid this mismatch by quoting
# all %'s for the set below.
engine_url = str(engine.url).replace('%', '%%')
config.set_main_option('sqlalchemy.url', str(engine_url))
LOG.info('Applying migration(s)')
_upgrade_alembic(engine, config, version)
LOG.info('Migration(s) applied')
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/db/migrations/ 0000775 0000000 0000000 00000000000 15131732575 0022375 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/db/migrations/__init__.py 0000664 0000000 0000000 00000000000 15131732575 0024474 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/db/migrations/env.py 0000664 0000000 0000000 00000006456 15131732575 0023552 0 ustar 00root root 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from logging.config import fileConfig
from alembic import context
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from cinder.db.sqlalchemy import models
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging unless we're told not to.
# This line sets up loggers basically.
if config.attributes.get('configure_logger', True):
fileConfig(config.config_file_name)
target_metadata = models.BASE.metadata
def include_name(name, type_, parent_names):
# if there are any columns or tables that should be excluded from
# auto-generation, include them here
return True
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL and not an Engine, though an
Engine is acceptable here as well. By skipping the Engine creation we
don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
include_name=include_name,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine and associate a connection
with the context.
This is modified from the default based on the below, since we want to
share an engine when unit testing so in-memory database testing actually
works.
https://alembic.sqlalchemy.org/en/latest/cookbook.html#connection-sharing
"""
connectable = config.attributes.get('connection', None)
if connectable is None:
# only create Engine if we don't have a Connection from the outside
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
include_name=include_name,
)
with context.begin_transaction():
context.run_migrations()
else:
context.configure(
connection=connectable,
target_metadata=target_metadata,
include_name=include_name,
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/db/migrations/script.py.mako 0000664 0000000 0000000 00000001720 15131732575 0025201 0 ustar 00root root 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
branch_labels = ${repr(branch_labels)}
depends_on = ${repr(depends_on)}
def upgrade():
${upgrades if upgrades else "pass"}
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/db/migrations/versions/ 0000775 0000000 0000000 00000000000 15131732575 0024245 5 ustar 00root root 0000000 0000000 89aa6f9639f9_drop_legacy_migrate_version_table.py 0000664 0000000 0000000 00000002105 15131732575 0035322 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/db/migrations/versions # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Drop legacy migrate_version table
Revision ID: 89aa6f9639f9
Revises: daa98075b90d
Create Date: 2023-02-17 12:41:37.940769
"""
from alembic import op
from sqlalchemy.engine import reflection
# revision identifiers, used by Alembic.
revision = '89aa6f9639f9'
down_revision = 'daa98075b90d'
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
inspector = reflection.Inspector.from_engine(conn)
tables = inspector.get_table_names()
if 'migrate_version' in tables:
op.drop_table('migrate_version')
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/db/migrations/versions/921e1a36b076_initial.py 0000664 0000000 0000000 00000117362 15131732575 0030014 0 ustar 00root root 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Initial migration.
Revision ID: 921e1a36b076
Revises:
Create Date: 2020-11-02 11:27:29.952490
"""
import datetime
import uuid
from alembic import op
from oslo_config import cfg
from oslo_utils import timeutils
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from sqlalchemy.sql import expression
from cinder.volume import group_types as volume_group_types
from cinder.volume import volume_types
# revision identifiers, used by Alembic.
revision = '921e1a36b076'
down_revision = None
branch_labels = None
depends_on = None
# Get default values via config. The defaults will either
# come from the default values set in the quota option
# configuration or via cinder.conf if the user has configured
# default values for quotas there.
CONF = cfg.CONF
CONF.import_opt('quota_volumes', 'cinder.quota')
CONF.import_opt('quota_snapshots', 'cinder.quota')
CONF.import_opt('quota_gigabytes', 'cinder.quota')
CLASS_NAME = 'default'
CREATED_AT = datetime.datetime.now() # noqa
def upgrade():
connection = op.get_bind()
op.create_table(
'services',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('deleted', sa.Boolean),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('host', sa.String(255)),
sa.Column('binary', sa.String(255)),
sa.Column('topic', sa.String(255)),
sa.Column('report_count', sa.Integer, nullable=False),
sa.Column('disabled', sa.Boolean),
sa.Column('availability_zone', sa.String(255)),
sa.Column('disabled_reason', sa.String(255)),
sa.Column('modified_at', sa.DateTime(timezone=False)),
sa.Column('rpc_current_version', sa.String(36)),
sa.Column('object_current_version', sa.String(36)),
sa.Column('replication_status', sa.String(36), default='not-capable'),
sa.Column('frozen', sa.Boolean, default=False),
sa.Column('active_backend_id', sa.String(255)),
sa.Column('cluster_name', sa.String(255), nullable=True),
sa.Column('uuid', sa.String(36), nullable=True),
sa.Index('services_uuid_idx', 'uuid', unique=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'consistencygroups',
sa.Column('created_at', sa.DateTime(timezone=False)),
sa.Column('updated_at', sa.DateTime(timezone=False)),
sa.Column('deleted_at', sa.DateTime(timezone=False)),
sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)),
sa.Column('id', sa.String(36), primary_key=True, nullable=False),
sa.Column('user_id', sa.String(255)),
sa.Column('project_id', sa.String(255)),
sa.Column('host', sa.String(255)),
sa.Column('availability_zone', sa.String(255)),
sa.Column('name', sa.String(255)),
sa.Column('description', sa.String(255)),
sa.Column('volume_type_id', sa.String(255)),
sa.Column('status', sa.String(255)),
sa.Column('cgsnapshot_id', sa.String(36)),
sa.Column('source_cgid', sa.String(36)),
sa.Column('cluster_name', sa.String(255), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'cgsnapshots',
sa.Column('created_at', sa.DateTime(timezone=False)),
sa.Column('updated_at', sa.DateTime(timezone=False)),
sa.Column('deleted_at', sa.DateTime(timezone=False)),
sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)),
sa.Column('id', sa.String(36), primary_key=True, nullable=False),
sa.Column(
'consistencygroup_id',
sa.String(36),
sa.ForeignKey('consistencygroups.id'),
nullable=False,
index=True,
),
sa.Column('user_id', sa.String(255)),
sa.Column('project_id', sa.String(255)),
sa.Column('name', sa.String(255)),
sa.Column('description', sa.String(255)),
sa.Column('status', sa.String(255)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'groups',
sa.Column('created_at', sa.DateTime(timezone=False)),
sa.Column('updated_at', sa.DateTime(timezone=False)),
sa.Column('deleted_at', sa.DateTime(timezone=False)),
sa.Column('deleted', sa.Boolean),
sa.Column('id', sa.String(36), primary_key=True, nullable=False),
sa.Column('user_id', sa.String(length=255)),
sa.Column('project_id', sa.String(length=255)),
sa.Column('cluster_name', sa.String(255)),
sa.Column('host', sa.String(length=255)),
sa.Column('availability_zone', sa.String(length=255)),
sa.Column('name', sa.String(length=255)),
sa.Column('description', sa.String(length=255)),
sa.Column('group_type_id', sa.String(length=36)),
sa.Column('status', sa.String(length=255)),
sa.Column('group_snapshot_id', sa.String(36)),
sa.Column('source_group_id', sa.String(36)),
sa.Column('replication_status', sa.String(255)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'group_snapshots',
sa.Column('created_at', sa.DateTime(timezone=False)),
sa.Column('updated_at', sa.DateTime(timezone=False)),
sa.Column('deleted_at', sa.DateTime(timezone=False)),
sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)),
sa.Column('id', sa.String(36), primary_key=True),
sa.Column(
'group_id',
sa.String(36),
sa.ForeignKey('groups.id'),
nullable=False,
index=True,
),
sa.Column('user_id', sa.String(length=255)),
sa.Column('project_id', sa.String(length=255)),
sa.Column('name', sa.String(length=255)),
sa.Column('description', sa.String(length=255)),
sa.Column('status', sa.String(length=255)),
sa.Column('group_type_id', sa.String(length=36)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'volumes',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('deleted', sa.Boolean),
sa.Column('id', sa.String(36), primary_key=True, nullable=False),
sa.Column('ec2_id', sa.String(255)),
sa.Column('user_id', sa.String(255)),
sa.Column('project_id', sa.String(255)),
sa.Column('host', sa.String(255)),
sa.Column('size', sa.Integer),
sa.Column('availability_zone', sa.String(255)),
sa.Column('status', sa.String(255)),
sa.Column('attach_status', sa.String(255)),
sa.Column('scheduled_at', sa.DateTime),
sa.Column('launched_at', sa.DateTime),
sa.Column('terminated_at', sa.DateTime),
sa.Column('display_name', sa.String(255)),
sa.Column('display_description', sa.String(255)),
sa.Column('provider_location', sa.String(256)),
sa.Column('provider_auth', sa.String(256)),
sa.Column('snapshot_id', sa.String(36)),
sa.Column('volume_type_id', sa.String(36), nullable=False),
sa.Column('source_volid', sa.String(36)),
sa.Column('bootable', sa.Boolean),
sa.Column('provider_geometry', sa.String(255)),
sa.Column('_name_id', sa.String(36)),
sa.Column('encryption_key_id', sa.String(36)),
sa.Column('migration_status', sa.String(255)),
sa.Column('replication_status', sa.String(255)),
sa.Column('replication_extended_status', sa.String(255)),
sa.Column('replication_driver_data', sa.String(255)),
sa.Column(
'consistencygroup_id',
sa.String(36),
sa.ForeignKey('consistencygroups.id'),
index=True,
),
sa.Column('provider_id', sa.String(255)),
sa.Column('multiattach', sa.Boolean),
sa.Column('previous_status', sa.String(255)),
sa.Column('cluster_name', sa.String(255), nullable=True),
sa.Column(
'group_id',
sa.String(36),
sa.ForeignKey('groups.id'),
index=True,
),
sa.Column(
'service_uuid',
sa.String(36),
sa.ForeignKey('services.uuid'),
nullable=True,
),
sa.Column('shared_targets', sa.Boolean, default=True),
sa.Column('use_quota', sa.Boolean, nullable=True),
sa.Index('volumes_service_uuid_idx', 'service_uuid', 'deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'volume_attachment',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('deleted', sa.Boolean),
sa.Column('id', sa.String(36), primary_key=True, nullable=False),
sa.Column(
'volume_id',
sa.String(36),
sa.ForeignKey('volumes.id'),
nullable=False,
index=True,
),
sa.Column('attached_host', sa.String(255)),
sa.Column('instance_uuid', sa.String(36)),
sa.Column('mountpoint', sa.String(255)),
sa.Column('attach_time', sa.DateTime),
sa.Column('detach_time', sa.DateTime),
sa.Column('attach_mode', sa.String(36)),
sa.Column('attach_status', sa.String(255)),
sa.Column('connection_info', sa.Text),
sa.Column('connector', sa.Text),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'attachment_specs',
sa.Column('created_at', sa.DateTime(timezone=False)),
sa.Column('updated_at', sa.DateTime(timezone=False)),
sa.Column('deleted_at', sa.DateTime(timezone=False)),
sa.Column('deleted', sa.Boolean(), default=False),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column(
'attachment_id',
sa.String(36),
sa.ForeignKey('volume_attachment.id'),
nullable=False,
index=True,
),
sa.Column('key', sa.String(255)),
sa.Column('value', sa.String(255)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'snapshots',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('deleted', sa.Boolean),
sa.Column('id', sa.String(36), primary_key=True, nullable=False),
sa.Column(
'volume_id',
sa.String(36),
sa.ForeignKey('volumes.id', name='snapshots_volume_id_fkey'),
nullable=False,
index=True,
),
sa.Column('user_id', sa.String(255)),
sa.Column('project_id', sa.String(255)),
sa.Column('status', sa.String(255)),
sa.Column('progress', sa.String(255)),
sa.Column('volume_size', sa.Integer),
sa.Column('scheduled_at', sa.DateTime),
sa.Column('display_name', sa.String(255)),
sa.Column('display_description', sa.String(255)),
sa.Column('provider_location', sa.String(255)),
sa.Column('encryption_key_id', sa.String(36)),
sa.Column('volume_type_id', sa.String(36), nullable=False),
sa.Column(
'cgsnapshot_id',
sa.String(36),
sa.ForeignKey('cgsnapshots.id'),
index=True,
),
sa.Column('provider_id', sa.String(255)),
sa.Column('provider_auth', sa.String(255)),
sa.Column(
'group_snapshot_id',
sa.String(36),
sa.ForeignKey('group_snapshots.id'),
index=True,
),
sa.Column('use_quota', sa.Boolean, nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'snapshot_metadata',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('deleted', sa.Boolean),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column(
'snapshot_id',
sa.String(36),
sa.ForeignKey('snapshots.id'),
nullable=False,
index=True,
),
sa.Column('key', sa.String(255)),
sa.Column('value', sa.String(255)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'quality_of_service_specs',
sa.Column('created_at', sa.DateTime(timezone=False)),
sa.Column('updated_at', sa.DateTime(timezone=False)),
sa.Column('deleted_at', sa.DateTime(timezone=False)),
sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)),
sa.Column('id', sa.String(36), primary_key=True, nullable=False),
sa.Column(
'specs_id',
sa.String(36),
sa.ForeignKey('quality_of_service_specs.id'),
index=True,
),
sa.Column('key', sa.String(255)),
sa.Column('value', sa.String(255)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
volume_types_table = op.create_table(
'volume_types',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('deleted', sa.Boolean),
sa.Column('id', sa.String(36), primary_key=True, nullable=False),
sa.Column('name', sa.String(255)),
sa.Column(
'qos_specs_id',
sa.String(36),
sa.ForeignKey('quality_of_service_specs.id'),
index=True,
),
sa.Column('is_public', sa.Boolean),
sa.Column('description', sa.String(255)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'volume_type_projects',
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column(
'volume_type_id', sa.String(36), sa.ForeignKey('volume_types.id')
),
sa.Column('project_id', sa.String(255)),
sa.Column('deleted', sa.Integer),
sa.UniqueConstraint('volume_type_id', 'project_id', 'deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'volume_metadata',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('deleted', sa.Boolean),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column(
'volume_id',
sa.String(36),
sa.ForeignKey('volumes.id'),
nullable=False,
index=True,
),
sa.Column('key', sa.String(255)),
sa.Column('value', sa.String(255)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'volume_type_extra_specs',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('deleted', sa.Boolean),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column(
'volume_type_id',
sa.String(36),
sa.ForeignKey(
'volume_types.id',
name='volume_type_extra_specs_ibfk_1',
),
nullable=False,
index=True,
),
sa.Column('key', sa.String(255)),
sa.Column('value', sa.String(255)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'quotas',
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('deleted', sa.Boolean),
sa.Column('project_id', sa.String(255)),
sa.Column('resource', sa.String(255), nullable=False),
sa.Column('hard_limit', sa.Integer),
sa.Column('allocated', sa.Integer, default=0),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
quota_classes_table = op.create_table(
'quota_classes',
sa.Column('created_at', sa.DateTime(timezone=False)),
sa.Column('updated_at', sa.DateTime(timezone=False)),
sa.Column('deleted_at', sa.DateTime(timezone=False)),
sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)),
sa.Column('id', sa.Integer(), primary_key=True),
sa.Column('class_name', sa.String(255), index=True),
sa.Column('resource', sa.String(255)),
sa.Column('hard_limit', sa.Integer(), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'quota_usages',
sa.Column('created_at', sa.DateTime(timezone=False)),
sa.Column('updated_at', sa.DateTime(timezone=False)),
sa.Column('deleted_at', sa.DateTime(timezone=False)),
sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)),
sa.Column('id', sa.Integer(), primary_key=True),
sa.Column('project_id', sa.String(255), index=True),
sa.Column('resource', sa.String(255)),
sa.Column('in_use', sa.Integer(), nullable=False),
sa.Column('reserved', sa.Integer(), nullable=False),
sa.Column('until_refresh', sa.Integer(), nullable=True),
sa.Column('race_preventer', sa.Boolean, nullable=True),
sa.Index('quota_usage_project_resource_idx', 'project_id', 'resource'),
sa.UniqueConstraint('project_id', 'resource', 'race_preventer'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'reservations',
sa.Column('created_at', sa.DateTime(timezone=False)),
sa.Column('updated_at', sa.DateTime(timezone=False)),
sa.Column('deleted_at', sa.DateTime(timezone=False)),
sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)),
sa.Column('id', sa.Integer(), primary_key=True),
sa.Column('uuid', sa.String(36), nullable=False),
sa.Column(
'usage_id',
sa.Integer(),
sa.ForeignKey('quota_usages.id'),
nullable=True,
index=True,
),
sa.Column('project_id', sa.String(255), index=True),
sa.Column('resource', sa.String(255)),
sa.Column('delta', sa.Integer(), nullable=False),
sa.Column('expire', sa.DateTime(timezone=False)),
sa.Column(
'allocated_id',
sa.Integer,
sa.ForeignKey('quotas.id'),
nullable=True,
index=True,
),
sa.Index('reservations_deleted_expire_idx', 'deleted', 'expire'),
sa.Index('reservations_deleted_uuid_idx', 'deleted', 'uuid'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'volume_glance_metadata',
sa.Column('created_at', sa.DateTime(timezone=False)),
sa.Column('updated_at', sa.DateTime(timezone=False)),
sa.Column('deleted_at', sa.DateTime(timezone=False)),
sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)),
sa.Column('id', sa.Integer(), primary_key=True, nullable=False),
sa.Column(
'volume_id',
sa.String(36),
sa.ForeignKey('volumes.id'),
index=True,
),
sa.Column(
'snapshot_id',
sa.String(36),
sa.ForeignKey('snapshots.id'),
index=True,
),
sa.Column('key', sa.String(255)),
sa.Column('value', sa.Text),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'backups',
sa.Column('created_at', sa.DateTime(timezone=False)),
sa.Column('updated_at', sa.DateTime(timezone=False)),
sa.Column('deleted_at', sa.DateTime(timezone=False)),
sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)),
sa.Column('id', sa.String(36), primary_key=True, nullable=False),
sa.Column('volume_id', sa.String(36), nullable=False),
sa.Column('user_id', sa.String(255)),
sa.Column('project_id', sa.String(255)),
sa.Column('host', sa.String(255)),
sa.Column('availability_zone', sa.String(255)),
sa.Column('display_name', sa.String(255)),
sa.Column('display_description', sa.String(255)),
sa.Column('container', sa.String(255)),
sa.Column('status', sa.String(255)),
sa.Column('fail_reason', sa.String(255)),
sa.Column('service_metadata', sa.String(255)),
sa.Column('service', sa.String(255)),
sa.Column('size', sa.Integer()),
sa.Column('object_count', sa.Integer()),
sa.Column('parent_id', sa.String(36)),
sa.Column('temp_volume_id', sa.String(36)),
sa.Column('temp_snapshot_id', sa.String(36)),
sa.Column('num_dependent_backups', sa.Integer, default=0),
sa.Column('snapshot_id', sa.String(36)),
sa.Column('data_timestamp', sa.DateTime),
sa.Column('restore_volume_id', sa.String(36)),
sa.Column('encryption_key_id', sa.String(36)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'backup_metadata',
sa.Column('created_at', sa.DateTime(timezone=False)),
sa.Column('updated_at', sa.DateTime(timezone=False)),
sa.Column('deleted_at', sa.DateTime(timezone=False)),
sa.Column('deleted', sa.Boolean(), default=False),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column(
'backup_id',
sa.String(36),
sa.ForeignKey('backups.id'),
nullable=False,
index=True,
),
sa.Column('key', sa.String(255)),
sa.Column('value', sa.String(255)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'transfers',
sa.Column('created_at', sa.DateTime(timezone=False)),
sa.Column('updated_at', sa.DateTime(timezone=False)),
sa.Column('deleted_at', sa.DateTime(timezone=False)),
sa.Column('deleted', sa.Boolean),
sa.Column('id', sa.String(36), primary_key=True, nullable=False),
sa.Column(
'volume_id',
sa.String(36),
sa.ForeignKey('volumes.id'),
nullable=False,
index=True,
),
sa.Column('display_name', sa.String(255)),
sa.Column('salt', sa.String(255)),
sa.Column('crypt_hash', sa.String(255)),
sa.Column('expires_at', sa.DateTime(timezone=False)),
sa.Column('no_snapshots', sa.Boolean, default=False),
sa.Column('source_project_id', sa.String(255), nullable=True),
sa.Column('destination_project_id', sa.String(255), nullable=True),
sa.Column('accepted', sa.Boolean, default=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
# Sqlite needs to handle nullable differently
is_nullable = connection.engine.name == 'sqlite'
op.create_table(
'encryption',
sa.Column('created_at', sa.DateTime(timezone=False)),
sa.Column('updated_at', sa.DateTime(timezone=False)),
sa.Column('deleted_at', sa.DateTime(timezone=False)),
sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)),
sa.Column('cipher', sa.String(255)),
sa.Column('control_location', sa.String(255), nullable=is_nullable),
sa.Column('key_size', sa.Integer),
sa.Column('provider', sa.String(255), nullable=is_nullable),
# NOTE(joel-coffman): The volume_type_id must be unique or else the
# referenced volume type becomes ambiguous. That is, specifying the
# volume type is not sufficient to identify a particular encryption
# scheme unless each volume type is associated with at most one
# encryption scheme.
sa.Column('volume_type_id', sa.String(36), nullable=False),
# NOTE (smcginnis): nullable=True triggers this to not set a default
# value, but since it's a primary key the resulting schema will end up
# still being NOT NULL. This is avoiding a case in MySQL where it will
# otherwise set this to NOT NULL DEFAULT ''. May be harmless, but
# inconsistent with previous schema.
sa.Column(
'encryption_id',
sa.String(36),
primary_key=True,
nullable=True,
),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'volume_admin_metadata',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('deleted', sa.Boolean),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column(
'volume_id',
sa.String(36),
sa.ForeignKey('volumes.id'),
nullable=False,
index=True,
),
sa.Column('key', sa.String(255)),
sa.Column('value', sa.String(255)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'driver_initiator_data',
sa.Column('created_at', sa.DateTime(timezone=False)),
sa.Column('updated_at', sa.DateTime(timezone=False)),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('initiator', sa.String(255), index=True, nullable=False),
sa.Column('namespace', sa.String(255), nullable=False),
sa.Column('key', sa.String(255), nullable=False),
sa.Column('value', sa.String(255)),
sa.UniqueConstraint('initiator', 'namespace', 'key'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'image_volume_cache_entries',
sa.Column('image_updated_at', sa.DateTime(timezone=False)),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('host', sa.String(255), index=True, nullable=False),
sa.Column('image_id', sa.String(36), index=True, nullable=False),
sa.Column('volume_id', sa.String(36), nullable=False),
sa.Column('size', sa.Integer, nullable=False),
sa.Column('last_used', sa.DateTime, nullable=False),
sa.Column('cluster_name', sa.String(255)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'messages',
sa.Column('id', sa.String(36), primary_key=True, nullable=False),
sa.Column('project_id', sa.String(255), nullable=False),
sa.Column('request_id', sa.String(255)),
sa.Column('resource_type', sa.String(36)),
sa.Column('resource_uuid', sa.String(255), nullable=True),
sa.Column('event_id', sa.String(255), nullable=False),
sa.Column('message_level', sa.String(255), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=False)),
sa.Column('updated_at', sa.DateTime(timezone=False)),
sa.Column('deleted_at', sa.DateTime(timezone=False)),
sa.Column('deleted', sa.Boolean),
sa.Column('expires_at', sa.DateTime(timezone=False), index=True),
sa.Column('detail_id', sa.String(10), nullable=True),
sa.Column('action_id', sa.String(10), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'clusters',
sa.Column('created_at', sa.DateTime(timezone=False)),
sa.Column('updated_at', sa.DateTime(timezone=False)),
sa.Column('deleted_at', sa.DateTime(timezone=False)),
sa.Column('deleted', sa.Boolean(), default=False),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('name', sa.String(255), nullable=False),
sa.Column('binary', sa.String(255), nullable=False),
sa.Column('disabled', sa.Boolean(), default=False),
sa.Column('disabled_reason', sa.String(255)),
sa.Column('race_preventer', sa.Integer, nullable=False, default=0),
sa.Column(
'replication_status',
sa.String(length=36),
default='not-capable',
),
sa.Column('active_backend_id', sa.String(length=255)),
sa.Column(
'frozen',
sa.Boolean,
nullable=False,
default=False,
server_default=expression.false(),
),
# To remove potential races on creation we have a constraint set on
# name and race_preventer fields, and we set value on creation to 0, so
# 2 clusters with the same name will fail this constraint. On deletion
# we change this field to the same value as the id which will be unique
# and will not conflict with the creation of another cluster with the
# same name.
sa.UniqueConstraint('name', 'binary', 'race_preventer'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
workers_table = op.create_table(
'workers',
sa.Column('created_at', sa.DateTime(timezone=False)),
sa.Column('updated_at', sa.DateTime(timezone=False)),
sa.Column('deleted_at', sa.DateTime(timezone=False)),
sa.Column('deleted', sa.Boolean(), default=False),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('resource_type', sa.String(40), nullable=False),
sa.Column('resource_id', sa.String(36), nullable=False),
sa.Column('status', sa.String(255), nullable=False),
sa.Column(
'service_id',
sa.Integer,
sa.ForeignKey('services.id'),
nullable=True,
index=True,
),
sa.Column(
'race_preventer',
sa.Integer,
nullable=False,
default=0,
server_default=sa.text('0'),
),
sa.UniqueConstraint('resource_type', 'resource_id'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_types_table = op.create_table(
'group_types',
sa.Column('id', sa.String(36), primary_key=True, nullable=False),
sa.Column('name', sa.String(255), nullable=False),
sa.Column('description', sa.String(255)),
sa.Column('created_at', sa.DateTime(timezone=False)),
sa.Column('updated_at', sa.DateTime(timezone=False)),
sa.Column('deleted_at', sa.DateTime(timezone=False)),
sa.Column('deleted', sa.Boolean),
sa.Column('is_public', sa.Boolean),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_type_specs_table = op.create_table(
'group_type_specs',
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('key', sa.String(255)),
sa.Column('value', sa.String(255)),
sa.Column(
'group_type_id',
sa.String(36),
sa.ForeignKey('group_types.id'),
nullable=False,
index=True,
),
sa.Column('created_at', sa.DateTime(timezone=False)),
sa.Column('updated_at', sa.DateTime(timezone=False)),
sa.Column('deleted_at', sa.DateTime(timezone=False)),
sa.Column('deleted', sa.Boolean),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'group_type_projects',
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column(
'group_type_id', sa.String(36), sa.ForeignKey('group_types.id')
),
sa.Column('project_id', sa.String(length=255)),
sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)),
sa.UniqueConstraint('group_type_id', 'project_id', 'deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'group_volume_type_mapping',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column('deleted', sa.Boolean),
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column(
'volume_type_id',
sa.String(36),
sa.ForeignKey('volume_types.id'),
nullable=False,
index=True,
),
sa.Column(
'group_id',
sa.String(36),
sa.ForeignKey('groups.id'),
nullable=False,
index=True,
),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
op.create_table(
'default_volume_types',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('deleted_at', sa.DateTime),
sa.Column(
'volume_type_id',
sa.String(36),
sa.ForeignKey('volume_types.id'),
index=True,
),
sa.Column(
'project_id',
sa.String(length=255),
primary_key=True,
nullable=False,
),
sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
if connection.engine.name == "mysql":
tables = [
"consistencygroups",
"cgsnapshots",
"snapshots",
"snapshot_metadata",
"quality_of_service_specs",
"volume_types",
"volume_type_projects",
"volumes",
"volume_attachment",
"quotas",
"services",
"volume_metadata",
"volume_type_extra_specs",
"quota_classes",
"quota_usages",
"reservations",
"volume_glance_metadata",
"backups",
"backup_metadata",
"transfers",
"encryption",
"volume_admin_metadata",
"driver_initiator_data",
"image_volume_cache_entries",
]
op.execute("SET foreign_key_checks = 0")
for table in tables:
op.execute(
"ALTER TABLE %s CONVERT TO CHARACTER SET utf8" % table
)
op.execute("SET foreign_key_checks = 1")
op.execute(
"ALTER DATABASE %s DEFAULT CHARACTER SET utf8"
% connection.engine.url.database
)
op.execute("ALTER TABLE %s Engine=InnoDB" % table)
# This is only necessary for mysql, and since the table is not in use this
# will only be a schema update.
if connection.engine.name.startswith('mysql'):
try:
with op.batch_alter_table('workers') as batch_op:
batch_op.alter_column(
'updated_at', type_=mysql.DATETIME(fsp=6)
)
except Exception:
# MySQL v5.5 or earlier don't support sub-second resolution so we
# may have cleanup races in Active-Active configurations, that's
# why upgrading is recommended in that case.
# Code in Cinder is capable of working with 5.5, so for 5.5 there's
# no problem
pass
# Increase the resource column size to the quota_usages table.
#
# The resource value is constructed from (prefix + volume_type_name),
# but the length of volume_type_name is limited to 255, if we add a
# prefix such as 'volumes_' or 'gigabytes_' to volume_type_name it
# will exceed the db length limit.
try:
with op.batch_alter_table('quota_usages') as batch_op:
batch_op.alter_column('resource', type_=sa.String(300))
except Exception:
# On MariaDB, max length varies depending on the version and the InnoDB
# page size [1], so it is possible to have error 1071 ('Specified key
# was too long; max key length is 767 bytes"). Since this migration is
# to resolve a corner case, deployments with those DB versions won't be
# covered.
# [1]: https://mariadb.com/kb/en/library/innodb-limitations/#page-sizes
if not connection.engine.name.startswith('mysql'):
raise
op.bulk_insert(
quota_classes_table,
[
# Set default quota class values
{
'created_at': CREATED_AT,
'class_name': CLASS_NAME,
'resource': 'volumes',
'hard_limit': CONF.quota_volumes,
'deleted': False,
},
{
'created_at': CREATED_AT,
'class_name': CLASS_NAME,
'resource': 'snapshots',
'hard_limit': CONF.quota_snapshots,
'deleted': False,
},
# Set default gigabytes
{
'created_at': CREATED_AT,
'class_name': CLASS_NAME,
'resource': 'gigabytes',
'hard_limit': CONF.quota_gigabytes,
'deleted': False,
},
{
'created_at': CREATED_AT,
'class_name': CLASS_NAME,
'resource': 'per_volume_gigabytes',
'hard_limit': -1,
'deleted': False,
},
{
'created_at': CREATED_AT,
'class_name': CLASS_NAME,
'resource': 'groups',
'hard_limit': CONF.quota_groups,
'deleted': False,
},
],
)
# TODO(geguileo): Once we remove support for MySQL 5.5 we have to create
# an upgrade migration to remove this row.
# Set workers table sub-second support sentinel
now = timeutils.utcnow().replace(microsecond=123)
op.bulk_insert(
workers_table,
[
{
'created_at': now,
'updated_at': now,
'deleted': False,
'resource_type': 'SENTINEL',
'resource_id': 'SUB-SECOND',
'status': 'OK',
},
],
)
# Create default group type
now = timeutils.utcnow()
grp_type_id = "%s" % uuid.uuid4()
op.bulk_insert(
group_types_table,
[
{
'id': grp_type_id,
'name': volume_group_types.DEFAULT_CGSNAPSHOT_TYPE,
'description': 'Default group type for migrating cgsnapshot',
'created_at': now,
'updated_at': now,
'deleted': False,
'is_public': True,
},
],
)
op.bulk_insert(
group_type_specs_table,
[
{
'key': 'consistent_group_snapshot_enabled',
'value': ' True',
'group_type_id': grp_type_id,
'created_at': now,
'updated_at': now,
'deleted': False,
},
],
)
# Create default volume type
op.bulk_insert(
volume_types_table,
[
{
'id': str(uuid.uuid4()),
'name': volume_types.DEFAULT_VOLUME_TYPE,
'description': 'Default Volume Type',
'created_at': now,
'updated_at': now,
'deleted': False,
'is_public': True,
},
],
)
9ab1b092a404_make_use_quota_non_nullable.py 0000664 0000000 0000000 00000003752 15131732575 0034103 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/db/migrations/versions # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Make use_quota non nullable
Revision ID: 9ab1b092a404
Revises: b8660621f1b9
Create Date: 2021-10-22 16:23:17.080934
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9ab1b092a404'
down_revision = 'b8660621f1b9'
branch_labels = None
depends_on = None
def upgrade():
# It's safe to set them as non nullable because when we run db sync on this
# release the online migrations from the previous release must already have
# been run.
connection = op.get_bind()
# SQLite doesn't support dropping/altering tables, so we use a workaround
if connection.engine.name == 'sqlite':
with op.batch_alter_table('volumes') as batch_op:
batch_op.alter_column('use_quota',
existing_type=sa.BOOLEAN,
nullable=False, server_default=sa.true())
with op.batch_alter_table('snapshots') as batch_op:
batch_op.alter_column('use_quota',
existing_type=sa.BOOLEAN,
nullable=False, server_default=sa.true())
else:
op.alter_column('volumes', 'use_quota',
existing_type=sa.BOOLEAN,
nullable=False, server_default=sa.true())
op.alter_column('snapshots', 'use_quota',
existing_type=sa.BOOLEAN,
nullable=False, server_default=sa.true())
9c74c1c6971f_quota_add_backup_defaults_in_quota_class.py 0000664 0000000 0000000 00000004000 15131732575 0036625 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/db/migrations/versions # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Quota: Add backup defaults in quota class
Revision ID: 9c74c1c6971f
Revises: b7b88f50aab5
Create Date: 2021-11-10 12:17:06.713239
"""
from datetime import datetime
from alembic import op
from oslo_config import cfg
import sqlalchemy as sa
from cinder.db.sqlalchemy import models
# revision identifiers, used by Alembic.
revision = '9c74c1c6971f'
down_revision = 'b7b88f50aab5'
branch_labels = None
depends_on = None
def _create_default(bind, resource, hard_limit):
session = sa.orm.Session(bind=bind)
class_name = 'default'
created_at = datetime.now() # noqa
with session.begin():
if session.query(sa.sql.exists()
.where(
sa.and_(
~models.QuotaClass.deleted,
models.QuotaClass.class_name == class_name,
models.QuotaClass.resource == resource)))\
.scalar():
return
quota_class = models.QuotaClass(created_at=created_at,
class_name=class_name,
resource=resource,
hard_limit=hard_limit,
deleted=False)
session.add(quota_class)
def upgrade():
bind = op.get_bind()
_create_default(bind, 'backups', cfg.CONF.quota_backups)
_create_default(bind, 'backup_gigabytes', cfg.CONF.quota_backup_gigabytes)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/db/migrations/versions/__init__.py 0000664 0000000 0000000 00000000000 15131732575 0026344 0 ustar 00root root 0000000 0000000 b7b88f50aab5_remove_quota_consistencygroups.py 0000664 0000000 0000000 00000002455 15131732575 0035113 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/db/migrations/versions # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Remove quota consistencygroups
Revision ID: b7b88f50aab5
Revises: 9ab1b092a404
Create Date: 2021-11-10 11:54:50.123389
"""
from alembic import op
from sqlalchemy import orm
from cinder.db.sqlalchemy import models
# revision identifiers, used by Alembic.
revision = 'b7b88f50aab5'
down_revision = '9ab1b092a404'
branch_labels = None
depends_on = None
def upgrade():
bind = op.get_bind()
session = orm.Session(bind=bind)
with session.begin():
for model in (models.QuotaClass,
models.Quota,
models.QuotaUsage,
models.Reservation):
session.query(model)\
.filter_by(deleted=False, resource='consistencygroups')\
.update(model.delete_values())
b8660621f1b9_update_reservations_resource.py 0000664 0000000 0000000 00000005246 15131732575 0034304 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/db/migrations/versions # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Update reservations resource
Revision ID: b8660621f1b9
Revises: 89aa6f9639f9
Create Date: 2021-10-27 17:25:16.790525
"""
from alembic import op
from oslo_log import log as logging
import sqlalchemy as sa
LOG = logging.getLogger(__name__)
# revision identifiers, used by Alembic.
revision = 'b8660621f1b9'
down_revision = '89aa6f9639f9'
branch_labels = None
depends_on = None
def upgrade():
connection = op.get_bind()
for table_name in ('quotas', 'quota_classes', 'reservations'):
table = sa.Table(table_name, sa.MetaData(), autoload_with=connection)
col = table.c.resource
# SQLite doesn't support altering tables, so we use a workaround
if connection.engine.name == 'sqlite':
with op.batch_alter_table(table_name) as batch_op:
batch_op.alter_column('resource',
existing_type=col.type,
type_=sa.String(length=300))
else:
# MySQL ALTER needs to have existing_type, existing_server_default,
# and existing_nullable or it will do who-knows-what
try:
op.alter_column(table_name, 'resource',
existing_type=col.type,
existing_nullable=col.nullable,
existing_server_default=col.server_default,
type_=sa.String(length=300))
except Exception:
# On MariaDB, max length varies depending on the version and
# the InnoDB page size [1], so it is possible to have error
# 1071 ('Specified key was too long; max key length is 767
# bytes"). Since this migration is to resolve a corner case,
# deployments with those DB versions won't be covered.
# [1]: https://mariadb.com/kb/en/library/innodb-limitations/#page-sizes # noqa
if not connection.engine.name == 'mysql':
raise
LOG.warning('Error in migration %s, Cinder still affected by '
'bug #1948962', revision)
c92a3e68beed_make_shared_targets_nullable.py 0000664 0000000 0000000 00000003232 15131732575 0034456 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/db/migrations/versions # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Make shared_targets nullable
Revision ID: c92a3e68beed
Revises: 921e1a36b076
Create Date: 2022-03-23 21:30:18.585830
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c92a3e68beed'
down_revision = '921e1a36b076'
branch_labels = None
depends_on = None
def upgrade():
connection = op.get_bind()
# Preserve existing type, be it boolean or tinyint treated as boolean
table = sa.Table('volumes', sa.MetaData(), autoload_with=connection)
existing_type = table.c.shared_targets.type
# SQLite doesn't support altering tables, so we use a workaround
if connection.engine.name == 'sqlite':
with op.batch_alter_table('volumes') as batch_op:
batch_op.alter_column('shared_targets',
existing_type=existing_type,
type_=sa.Boolean(),
nullable=True)
else:
op.alter_column('volumes', 'shared_targets',
existing_type=existing_type,
type_=sa.Boolean(),
nullable=True)
daa98075b90d_add_resource_indexes.py 0000664 0000000 0000000 00000003531 15131732575 0032621 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/db/migrations/versions # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Add resource indexes
Revision ID: daa98075b90d
Revises: c92a3e68beed
Create Date: 2021-11-26 10:26:41.883072
"""
from alembic import op
from oslo_db.sqlalchemy import utils
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
# revision identifiers, used by Alembic.
revision = 'daa98075b90d'
down_revision = 'c92a3e68beed'
branch_labels = None
depends_on = None
INDEXES = (
('groups', 'groups_deleted_project_id_idx', ('deleted', 'project_id')),
('group_snapshots', 'group_snapshots_deleted_project_id_idx',
('deleted', 'project_id')),
('volumes', 'volumes_deleted_project_id_idx', ('deleted', 'project_id')),
('volumes', 'volumes_deleted_host_idx', ('deleted', 'host')),
('backups', 'backups_deleted_project_id_idx', ('deleted', 'project_id')),
('snapshots', 'snapshots_deleted_project_id_idx', ('deleted',
'project_id')),
)
def upgrade():
conn = op.get_bind()
is_mysql = conn.dialect.name == 'mysql'
for table, idx_name, fields in INDEXES:
# Skip creation in mysql if it already has the index
if is_mysql and utils.index_exists(conn, table, idx_name):
LOG.info('Skipping index %s, already exists', idx_name)
else:
op.create_index(idx_name, table, fields)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/db/sqlalchemy/ 0000775 0000000 0000000 00000000000 15131732575 0022363 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/db/sqlalchemy/__init__.py 0000664 0000000 0000000 00000000000 15131732575 0024462 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/db/sqlalchemy/api.py 0000664 0000000 0000000 00001003733 15131732575 0023515 0 ustar 00root root 0000000 0000000 # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for DB access.
Functions in this module are imported into the cinder.db namespace. Call these
functions from cinder.db namespace, not the cinder.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface. Currently, many of these objects are sqlalchemy objects that
implement a dictionary interface. However, a future goal is to have all of
these objects be simple dictionaries.
**Related Flags**
:connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/cinder/cinder.sqlite`.
:enable_new_services: when adding a new service to the database, is it in the
pool of available hardware (Default: True)
"""
import collections
from collections import abc
import datetime as dt
import functools
import itertools
import re
import sys
import uuid
from oslo_config import cfg
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
from oslo_db import options
from oslo_db.sqlalchemy import enginefacade
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
osprofiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy')
import sqlalchemy as sa
from sqlalchemy import MetaData
from sqlalchemy import or_, and_
from sqlalchemy.orm import joinedload, undefer_group, load_only
from sqlalchemy.orm import RelationshipProperty
from sqlalchemy import sql
from sqlalchemy.sql.expression import bindparam
from sqlalchemy.sql.expression import desc
from sqlalchemy.sql.expression import true
from sqlalchemy.sql import func
from sqlalchemy.sql import sqltypes
from cinder.api import common
from cinder.common import sqlalchemyutils
from cinder import db
from cinder.db.sqlalchemy import models
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
from cinder import utils
from cinder.volume import volume_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# Map with cases where attach status differs from volume status
ATTACH_STATUS_MAP = {'attached': 'in-use', 'detached': 'available'}
options.set_defaults(CONF, connection='sqlite:///$state_path/cinder.sqlite')
main_context_manager = enginefacade.transaction_context()
def get_engine():
return main_context_manager.writer.get_engine()
def dispose_engine():
get_engine().dispose()
_DEFAULT_QUOTA_NAME = 'default'
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def is_admin_context(context):
"""Indicates if the request context is an administrator."""
if not context:
raise exception.CinderException(
'Use of empty request context is deprecated'
)
return context.is_admin
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
if is_user_context(context):
if not context.project_id:
raise exception.NotAuthorized()
elif context.project_id != project_id:
raise exception.NotAuthorized()
def authorize_user_context(context, user_id):
"""Ensures a request has permission to access the given user."""
if is_user_context(context):
if not context.user_id:
raise exception.NotAuthorized()
elif context.user_id != user_id:
raise exception.NotAuthorized()
def authorize_quota_class_context(context, class_name):
"""Ensures a request has permission to access the given quota class."""
if is_user_context(context):
if not context.quota_class:
raise exception.NotAuthorized()
elif context.quota_class != class_name:
raise exception.NotAuthorized()
def require_admin_context(f):
"""Decorator to require admin request context.
The first argument to the wrapped function must be the context.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]):
raise exception.AdminRequired()
return f(*args, **kwargs)
return wrapper
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`authorize_project_context` and
:py:func:`authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]) and not is_user_context(args[0]):
raise exception.NotAuthorized()
return f(*args, **kwargs)
return wrapper
###################
@require_context
@main_context_manager.reader
def resource_exists(context, model, resource_id):
conditions = [model.id == resource_id]
# Match non deleted resources by the id
if 'no' == context.read_deleted:
conditions.append(~model.deleted)
# If the context is not admin we limit it to the context's project
if is_user_context(context) and hasattr(model, 'project_id'):
conditions.append(model.project_id == context.project_id)
query = context.session.query(sql.exists().where(and_(*conditions)))
return query.scalar()
def require_volume_exists(f):
"""Decorator to require the specified volume to exist.
Requires the wrapped function to use context and volume_id as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, volume_id, *args, **kwargs):
if not resource_exists(context, models.Volume, volume_id):
raise exception.VolumeNotFound(volume_id=volume_id)
return f(context, volume_id, *args, **kwargs)
return wrapper
def require_snapshot_exists(f):
"""Decorator to require the specified snapshot to exist.
Requires the wrapped function to use context and snapshot_id as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, snapshot_id, *args, **kwargs):
if not resource_exists(context, models.Snapshot, snapshot_id):
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
return f(context, snapshot_id, *args, **kwargs)
return wrapper
def require_backup_exists(f):
"""Decorator to require the specified snapshot to exist.
Requires the wrapped function to use context and backup_id as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, backup_id, *args, **kwargs):
if not resource_exists(context, models.Backup, backup_id):
raise exception.BackupNotFound(backup_id=backup_id)
return f(context, backup_id, *args, **kwargs)
return wrapper
def require_qos_specs_exists(f):
"""Decorator to require the specified QoS speces exist.
Requires the wrapped function to use context and qos_specs_id as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, qos_specs_id, *args, **kwargs):
if not resource_exists(
context,
models.QualityOfServiceSpecs,
qos_specs_id,
):
raise exception.QoSSpecsNotFound(specs_id=qos_specs_id)
return f(context, qos_specs_id, *args, **kwargs)
return wrapper
def handle_db_data_error(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except db_exc.DBDataError:
msg = _('Error writing field to database')
LOG.exception(msg)
raise exception.Invalid(msg)
return wrapper
def model_query(context, model, *args, **kwargs):
"""Query helper that accounts for context's `read_deleted` field.
:param context: A request context to query under
:param model: Model to query. Must be a subclass of ModelBase.
:param args: Arguments to query. If None - model is used.
:param read_deleted: if present, overrides context's read_deleted field.
:param project_only: if present and context is user-type, then restrict
query to match the context's project_id.
"""
read_deleted = kwargs.get('read_deleted') or context.read_deleted
project_only = kwargs.get('project_only')
query = context.session.query(model, *args)
if read_deleted == 'no':
query = query.filter_by(deleted=False)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter_by(deleted=True)
elif read_deleted == 'int_no':
query = query.filter_by(deleted=0)
else:
msg = _("Unrecognized read_deleted value '%s'")
raise Exception(msg % read_deleted)
if project_only and is_user_context(context):
if model is models.VolumeAttachment:
# NOTE(dulek): In case of VolumeAttachment, we need to join
# `project_id` through `volume` relationship.
query = query.filter(
models.Volume.project_id == context.project_id
)
else:
query = query.filter_by(project_id=context.project_id)
return query
###################
def get_model_for_versioned_object(versioned_object):
if isinstance(versioned_object, str):
model_name = versioned_object
else:
model_name = versioned_object.obj_name()
if model_name == 'BackupImport':
return models.Backup
return getattr(models, model_name)
def _get_get_method(model):
# Exceptions to model to get methods, in general method names are a simple
# conversion changing ORM name from camel case to snake format and adding
# _get to the string
GET_EXCEPTIONS = {
models.ConsistencyGroup: consistencygroup_get,
models.VolumeType: _volume_type_get_full,
models.QualityOfServiceSpecs: qos_specs_get,
models.GroupType: _group_type_get_full,
models.CGSnapshot: cgsnapshot_get,
}
if model in GET_EXCEPTIONS:
return GET_EXCEPTIONS[model]
# General conversion
# Convert camel cased model name to snake format
s = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', model.__name__)
# Get method must be snake formatted model name concatenated with _get
method_name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s).lower() + '_get'
return globals().get(method_name)
_GET_METHODS = {}
@require_context
@main_context_manager.reader
def get_by_id(context, model, id, *args, **kwargs):
# Add get method to cache dictionary if it's not already there
if not _GET_METHODS.get(model):
_GET_METHODS[model] = _get_get_method(model)
return _GET_METHODS[model](context, id, *args, **kwargs)
def condition_db_filter(model, field, value):
"""Create matching filter.
If value is an iterable other than a string, any of the values is
a valid match (OR), so we'll use SQL IN operator.
If it's not an iterator == operator will be used.
"""
orm_field = getattr(model, field)
# For values that must match and are iterables we use IN
if isinstance(value, abc.Iterable) and not isinstance(value, str):
# We cannot use in_ when one of the values is None
if None not in value:
return orm_field.in_(value)
return or_(orm_field == v for v in value)
# For values that must match and are not iterables we use ==
return orm_field == value
def condition_not_db_filter(model, field, value, auto_none=True):
"""Create non matching filter.
If value is an iterable other than a string, any of the values is
a valid match (OR), so we'll use SQL IN operator.
If it's not an iterator == operator will be used.
If auto_none is True then we'll consider NULL values as different as well,
like we do in Python and not like SQL does.
"""
result = ~condition_db_filter(model, field, value) # pylint: disable=E1130
if auto_none and (
(
isinstance(value, abc.Iterable)
and not isinstance(value, str)
and None not in value
)
or (value is not None)
):
orm_field = getattr(model, field)
result = or_(result, orm_field.is_(None))
return result
def is_orm_value(obj):
"""Check if object is an ORM field or expression."""
return isinstance(
obj,
(
sa.orm.attributes.InstrumentedAttribute,
sa.sql.expression.ColumnElement,
),
)
def _check_is_not_multitable(values, model):
"""Check that we don't try to do multitable updates.
Since PostgreSQL doesn't support multitable updates we want to always fail
if we have such a query in our code, even if with MySQL it would work.
"""
used_models = set()
for field in values:
if isinstance(field, sa.orm.attributes.InstrumentedAttribute):
used_models.add(field.class_)
elif isinstance(field, str):
used_models.add(model)
else:
raise exception.ProgrammingError(
reason=(
'DB Conditional update - Unknown field type, must be '
'string or ORM field.'
),
)
if len(used_models) > 1:
raise exception.ProgrammingError(
reason=(
'DB Conditional update - Error in query, multitable '
'updates are not supported.'
),
)
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def _conditional_update(
context,
model,
values,
expected_values,
filters=None,
include_deleted='no',
project_only=False,
order=None,
):
"""Compare-and-swap conditional update SQLAlchemy implementation."""
_check_is_not_multitable(values, model)
# Provided filters will become part of the where clause
where_conds = list(filters) if filters else []
# Build where conditions with operators ==, !=, NOT IN and IN
for field, condition in expected_values.items():
if not isinstance(condition, db.Condition):
condition = db.Condition(condition, field)
where_conds.append(condition.get_filter(model, field))
# Create the query with the where clause
query = model_query(
context, model, read_deleted=include_deleted, project_only=project_only
).filter(*where_conds)
# NOTE(geguileo): Some DBs' update method are order dependent, and they
# behave differently depending on the order of the values, example on a
# volume with 'available' status:
# UPDATE volumes SET previous_status=status, status='reyping'
# WHERE id='44f284f9-877d-4fce-9eb4-67a052410054';
# Will result in a volume with 'retyping' status and 'available'
# previous_status both on SQLite and MariaDB, but
# UPDATE volumes SET status='retyping', previous_status=status
# WHERE id='44f284f9-877d-4fce-9eb4-67a052410054';
# Will yield the same result in SQLite but will result in a volume with
# status and previous_status set to 'retyping' in MariaDB, which is not
# what we want, so order must be taken into consideration.
# Order for the update will be:
# 1- Order specified in argument order
# 2- Values that refer to other ORM field (simple and using operations,
# like size + 10)
# 3- Values that use Case clause (since they may be using fields as well)
# 4- All other values
order = list(order) if order else tuple()
orm_field_list = []
case_list = []
unordered_list = []
for key, value in values.items():
if isinstance(value, db.Case):
value = sa.case(
*value.whens,
value=value.value,
else_=value.else_,
)
if key in order:
# pylint: disable=E1137; ("order" is known to be a list, here)
order[order.index(key)] = (key, value)
continue
# NOTE(geguileo): Check Case first since it's a type of orm value
if isinstance(value, sql.elements.Case):
value_list = case_list
elif is_orm_value(value):
value_list = orm_field_list
else:
value_list = unordered_list
value_list.append((key, value))
update_args = {'synchronize_session': False}
# If we don't have to enforce any kind of order just pass along the values
# dictionary since it will be a little more efficient.
if order or orm_field_list or case_list:
# If we are doing an update with ordered parameters, we need to add
# remaining values to the list
values = itertools.chain(
order, orm_field_list, case_list, unordered_list
)
# And we have to tell SQLAlchemy that we want to preserve the order
update_args['update_args'] = {'preserve_parameter_order': True}
# Return True if we were able to change any DB entry, False otherwise
result = query.update(values, **update_args)
return 0 != result
@require_context
@main_context_manager.writer
def conditional_update(
context,
model,
values,
expected_values,
filters=None,
include_deleted='no',
project_only=False,
order=None,
):
"""Compare-and-swap conditional update SQLAlchemy implementation."""
return _conditional_update(
context,
model,
values,
expected_values,
filters=filters,
include_deleted=include_deleted,
project_only=project_only,
order=order,
)
###################
def _sync_volumes(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
volumes, _ = _volume_data_get_for_project(
context,
project_id,
volume_type_id=volume_type_id,
)
key = 'volumes'
if volume_type_name:
key += '_' + volume_type_name
return {key: volumes}
def _sync_snapshots(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
snapshots, _ = _snapshot_data_get_for_project(
context,
project_id,
volume_type_id=volume_type_id,
)
key = 'snapshots'
if volume_type_name:
key += '_' + volume_type_name
return {key: snapshots}
def _sync_backups(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
backups, _ = _backup_data_get_for_project(
context,
project_id,
volume_type_id=volume_type_id,
)
key = 'backups'
return {key: backups}
def _sync_gigabytes(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
_, vol_gigs = _volume_data_get_for_project(
context,
project_id,
volume_type_id=volume_type_id,
)
key = 'gigabytes'
if volume_type_name:
key += '_' + volume_type_name
if CONF.no_snapshot_gb_quota:
return {key: vol_gigs}
_, snap_gigs = _snapshot_data_get_for_project(
context,
project_id,
volume_type_id=volume_type_id,
)
return {key: vol_gigs + snap_gigs}
def _sync_backup_gigabytes(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
key = 'backup_gigabytes'
_, backup_gigs = _backup_data_get_for_project(
context,
project_id,
volume_type_id=volume_type_id,
)
return {key: backup_gigs}
def _sync_groups(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
_, groups = _group_data_get_for_project(context, project_id)
key = 'groups'
return {key: groups}
QUOTA_SYNC_FUNCTIONS = {
'_sync_volumes': _sync_volumes,
'_sync_snapshots': _sync_snapshots,
'_sync_gigabytes': _sync_gigabytes,
'_sync_backups': _sync_backups,
'_sync_backup_gigabytes': _sync_backup_gigabytes,
'_sync_groups': _sync_groups,
}
###################
def _clean_filters(filters):
return {k: v for k, v in filters.items() if v is not None}
def _filter_host(field, value, match_level=None):
"""Generate a filter condition for host and cluster fields.
Levels are:
- 'pool': Will search for an exact match
- 'backend': Will search for exact match and value#*
- 'host'; Will search for exact match, value@* and value#*
If no level is provided we'll determine it based on the value we want to
match:
- 'pool': If '#' is present in value
- 'backend': If '@' is present in value and '#' is not present
- 'host': In any other case
:param field: ORM field. Ex: objects.Volume.model.host
:param value: String to compare with
:param match_level: 'pool', 'backend', or 'host'
"""
# If we don't set level we'll try to determine it automatically. LIKE
# operations are expensive, so we try to reduce them to the minimum.
if match_level is None:
if '#' in value:
match_level = 'pool'
elif '@' in value:
match_level = 'backend'
else:
match_level = 'host'
# Mysql is not doing case sensitive filtering, so we force it
conn_str = CONF.database.connection
if conn_str.startswith('mysql') and conn_str[5] in ['+', ':']:
cmp_value = func.binary(value)
like_op = 'LIKE BINARY'
else:
cmp_value = value
like_op = 'LIKE'
conditions = [field == cmp_value]
if match_level != 'pool':
conditions.append(field.op(like_op)(value + '#%'))
if match_level == 'host':
conditions.append(field.op(like_op)(value + '@%'))
return or_(*conditions)
def _filter_time_comparison(field, time_filter_dict):
"""Generate a filter condition for time comparison operators"""
conditions = []
for operator in time_filter_dict:
filter_time = timeutils.normalize_time(time_filter_dict[operator])
if operator == 'gt':
conditions.append(field.op('>')(filter_time))
elif operator == 'gte':
conditions.append(field.op('>=')(filter_time))
if operator == 'eq':
conditions.append(field.op('=')(filter_time))
elif operator == 'neq':
conditions.append(field.op('!=')(filter_time))
if operator == 'lt':
conditions.append(field.op('<')(filter_time))
elif operator == 'lte':
conditions.append(field.op('<=')(filter_time))
return or_(*conditions)
def _clustered_bool_field_filter(query, field_name, filter_value):
# Now that we have clusters, a service is disabled/frozen if the service
# doesn't belong to a cluster or if it belongs to a cluster and the cluster
# itself is disabled/frozen.
if filter_value is not None:
query_filter = or_(
and_(
models.Service.cluster_name.is_(None),
getattr(models.Service, field_name),
),
and_(
models.Service.cluster_name.isnot(None),
sql.exists().where(
and_(
models.Cluster.name == models.Service.cluster_name,
models.Cluster.binary == models.Service.binary,
~models.Cluster.deleted,
getattr(models.Cluster, field_name),
)
),
),
)
if not filter_value:
query_filter = ~query_filter # pylint: disable=E1130
query = query.filter(query_filter)
return query
def _service_query(
context,
read_deleted='no',
host=None,
cluster_name=None,
is_up=None,
host_or_cluster=None,
backend_match_level=None,
disabled=None,
frozen=None,
**filters,
):
filters = _clean_filters(filters)
if filters and not is_valid_model_filters(models.Service, filters):
return None
query = model_query(context, models.Service, read_deleted=read_deleted)
# Host and cluster are particular cases of filters, because we must
# retrieve not only exact matches (single backend configuration), but also
# match those that have the backend defined (multi backend configuration).
if host:
query = query.filter(
_filter_host(models.Service.host, host, backend_match_level)
)
if cluster_name:
query = query.filter(
_filter_host(
models.Service.cluster_name, cluster_name, backend_match_level
)
)
if host_or_cluster:
query = query.filter(
or_(
_filter_host(
models.Service.host, host_or_cluster, backend_match_level
),
_filter_host(
models.Service.cluster_name,
host_or_cluster,
backend_match_level,
),
)
)
query = _clustered_bool_field_filter(query, 'disabled', disabled)
query = _clustered_bool_field_filter(query, 'frozen', frozen)
if filters:
query = query.filter_by(**filters)
if is_up is not None:
date_limit = utils.service_expired_time()
svc = models.Service
filter_ = or_(
and_(svc.created_at.isnot(None), svc.created_at >= date_limit),
and_(svc.updated_at.isnot(None), svc.updated_at >= date_limit),
)
query = query.filter(filter_ == is_up)
return query
@require_admin_context
@main_context_manager.writer
def service_destroy(context, service_id):
query = _service_query(context, id=service_id)
updated_values = models.Service.delete_values()
if not query.update(updated_values):
raise exception.ServiceNotFound(service_id=service_id)
return updated_values
@require_admin_context
@main_context_manager.reader
def service_get(context, service_id=None, backend_match_level=None, **filters):
"""Get a service that matches the criteria.
A possible filter is is_up=True and it will filter nodes that are down.
:param service_id: Id of the service.
:param filters: Filters for the query in the form of key/value.
:param backend_match_level: 'pool', 'backend', or 'host' for host and
cluster filters (as defined in _filter_host method)
:raise ServiceNotFound: If service doesn't exist.
"""
query = _service_query(
context,
backend_match_level=backend_match_level,
id=service_id,
**filters,
)
service = None if not query else query.first()
if not service:
serv_id = service_id or filters.get('topic') or filters.get('binary')
raise exception.ServiceNotFound(
service_id=serv_id, host=filters.get('host')
)
return service
@require_admin_context
@main_context_manager.reader
def service_get_all(context, backend_match_level=None, **filters):
"""Get all services that match the criteria.
A possible filter is is_up=True and it will filter nodes that are down.
:param filters: Filters for the query in the form of key/value.
:param backend_match_level: 'pool', 'backend', or 'host' for host and
cluster filters (as defined in _filter_host method)
"""
query = _service_query(
context, backend_match_level=backend_match_level, **filters
)
return [] if not query else query.all()
@require_admin_context
@main_context_manager.reader
def service_get_by_uuid(context, service_uuid):
query = model_query(context, models.Service).filter_by(uuid=service_uuid)
result = query.first()
if not result:
raise exception.ServiceNotFound(service_id=service_uuid)
return result
@require_admin_context
@main_context_manager.writer
def service_create(context, values):
service_ref = models.Service()
service_ref.update(values)
if not CONF.enable_new_services:
service_ref.disabled = True
service_ref.save(context.session)
return service_ref
@require_admin_context
@main_context_manager.writer
def service_update(context, service_id, values, retry=True):
def _service_update(context, service_id, values):
query = _service_query(context, id=service_id)
if 'disabled' in values:
entity = query.column_descriptions[0]['entity']
values = values.copy()
values['modified_at'] = values.get('modified_at',
timeutils.utcnow())
values['updated_at'] = values.get('updated_at',
entity.updated_at)
result = query.update(values)
if not result:
raise exception.ServiceNotFound(service_id=service_id)
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def _service_update_retry(context, service_id, values):
_service_update(context, service_id, values)
if retry:
_service_update_retry(context, service_id, values)
else:
_service_update(context, service_id, values)
###################
@require_admin_context
@main_context_manager.reader
def is_backend_frozen(context, host, cluster_name):
"""Check if a storage backend is frozen based on host and cluster_name."""
if cluster_name:
model = models.Cluster
conditions = [model.name == volume_utils.extract_host(cluster_name)]
else:
model = models.Service
conditions = [model.host == volume_utils.extract_host(host)]
conditions.extend((~model.deleted, model.frozen))
query = context.session.query(sql.exists().where(and_(*conditions)))
frozen = query.scalar()
return frozen
###################
def _cluster_query(
context,
is_up=None,
get_services=False,
services_summary=False,
read_deleted='no',
name_match_level=None,
name=None,
**filters,
):
filters = _clean_filters(filters)
if filters and not is_valid_model_filters(models.Cluster, filters):
return None
query = model_query(context, models.Cluster, read_deleted=read_deleted)
# Cluster is a special case of filter, because we must match exact match
# as well as hosts that specify the backend
if name:
query = query.filter(
_filter_host(models.Cluster.name, name, name_match_level)
)
if filters:
query = query.filter_by(**filters)
if services_summary:
query = query.options(undefer_group('services_summary'))
# We bind the expiration time to now (as it changes with each query)
# and is required by num_down_hosts
query = query.params(expired=utils.service_expired_time())
elif 'num_down_hosts' in filters:
query = query.params(expired=utils.service_expired_time())
if get_services:
query = query.options(joinedload(models.Cluster.services))
if is_up is not None:
date_limit = utils.service_expired_time()
filter_ = and_(
models.Cluster.last_heartbeat.isnot(None),
models.Cluster.last_heartbeat >= date_limit,
)
query = query.filter(filter_ == is_up)
return query
@require_admin_context
@main_context_manager.reader
def cluster_get(
context,
id=None,
is_up=None,
get_services=False,
services_summary=False,
read_deleted='no',
name_match_level=None,
**filters,
):
"""Get a cluster that matches the criteria.
:param id: Id of the cluster.
:param is_up: Boolean value to filter based on the cluster's up status.
:param get_services: If we want to load all services from this cluster.
:param services_summary: If we want to load num_hosts and
num_down_hosts fields.
:param read_deleted: Filtering based on delete status. Default value is
"no".
:param filters: Field based filters in the form of key/value.
:param name_match_level: 'pool', 'backend', or 'host' for name filter (as
defined in _filter_host method)
:raise ClusterNotFound: If cluster doesn't exist.
"""
query = _cluster_query(
context,
is_up,
get_services,
services_summary,
read_deleted,
name_match_level,
id=id,
**filters,
)
cluster = None if not query else query.first()
if not cluster:
cluster_id = id or str(filters)
raise exception.ClusterNotFound(id=cluster_id)
return cluster
@require_admin_context
@main_context_manager.reader
def cluster_get_all(
context,
is_up=None,
get_services=False,
services_summary=False,
read_deleted='no',
name_match_level=None,
**filters,
):
"""Get all clusters that match the criteria.
:param is_up: Boolean value to filter based on the cluster's up status.
:param get_services: If we want to load all services from this cluster.
:param services_summary: If we want to load num_hosts and
num_down_hosts fields.
:param read_deleted: Filtering based on delete status. Default value is
"no".
:param name_match_level: 'pool', 'backend', or 'host' for name filter (as
defined in _filter_host method)
:param filters: Field based filters in the form of key/value.
"""
query = _cluster_query(
context,
is_up,
get_services,
services_summary,
read_deleted,
name_match_level,
**filters,
)
return [] if not query else query.all()
@require_admin_context
@main_context_manager.writer
def cluster_create(context, values):
"""Create a cluster from the values dictionary."""
cluster_ref = models.Cluster()
cluster_ref.update(values)
# Provided disabled value takes precedence
if values.get('disabled') is None:
cluster_ref.disabled = not CONF.enable_new_services
try:
cluster_ref.save(context.session)
# We mark that newly created cluster has no hosts to prevent
# problems at the OVO level
cluster_ref.last_heartbeat = None
return cluster_ref
# If we had a race condition (another non deleted cluster exists with the
# same name) raise Duplicate exception.
except db_exc.DBDuplicateEntry:
raise exception.ClusterExists(name=values.get('name'))
@require_admin_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def cluster_update(context, cluster_id, values):
"""Set the given properties on an cluster and update it.
Raises ClusterNotFound if cluster does not exist.
"""
query = _cluster_query(context, id=cluster_id)
result = query.update(values)
if not result:
raise exception.ClusterNotFound(id=cluster_id)
@require_admin_context
@main_context_manager.writer
def cluster_destroy(context, cluster_id):
"""Destroy the cluster or raise if it does not exist or has hosts."""
query = _cluster_query(context, id=cluster_id)
query = query.filter(models.Cluster.num_hosts == 0)
# If the update doesn't succeed we don't know if it's because the
# cluster doesn't exist or because it has hosts.
result = query.update(
models.Cluster.delete_values(), synchronize_session=False
)
if not result:
# This will fail if the cluster doesn't exist raising the right
# exception
cluster_get(context, id=cluster_id)
# If it doesn't fail, then the problem is that there are hosts
raise exception.ClusterHasHosts(id=cluster_id)
###################
def _metadata_refs(metadata_dict, meta_class):
metadata_refs = []
if metadata_dict:
for k, v in metadata_dict.items():
metadata_ref = meta_class()
metadata_ref['key'] = k
metadata_ref['value'] = v
metadata_refs.append(metadata_ref)
return metadata_refs
def _dict_with_extra_specs_if_authorized(context, inst_type_query):
"""Convert type query result to dict with extra_spec and rate_limit.
Takes a volume type query returned by sqlalchemy and returns it
as a dictionary, converting the extra_specs entry from a list
of dicts. NOTE the contents of extra-specs are admin readable
only. If the context passed in for this request is not admin
then we will return an empty extra-specs dict rather than
providing the admin only details.
Example response with admin context:
'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
to a single dict:
'extra_specs' : {'k1': 'v1'}
"""
inst_type_dict = dict(inst_type_query)
extra_specs = {
x['key']: x['value'] for x in inst_type_query['extra_specs']
}
inst_type_dict['extra_specs'] = extra_specs
return inst_type_dict
###################
def _dict_with_group_specs_if_authorized(context, inst_type_query):
"""Convert group type query result to dict with spec and rate_limit.
Takes a group type query returned by sqlalchemy and returns it
as a dictionary, converting the extra_specs entry from a list
of dicts. NOTE the contents of extra-specs are admin readable
only. If the context passed in for this request is not admin
then we will return an empty extra-specs dict rather than
providing the admin only details.
Example response with admin context:
'group_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
to a single dict:
'group_specs' : {'k1': 'v1'}
"""
inst_type_dict = dict(inst_type_query)
if not is_admin_context(context):
del inst_type_dict['group_specs']
else:
group_specs = {
x['key']: x['value'] for x in inst_type_query['group_specs']
}
inst_type_dict['group_specs'] = group_specs
return inst_type_dict
###################
def _quota_get(context, project_id, resource):
result = (
model_query(context, models.Quota, read_deleted="no")
.filter_by(project_id=project_id)
.filter_by(resource=resource)
.first()
)
if not result:
raise exception.ProjectQuotaNotFound(project_id=project_id)
return result
@require_context
@main_context_manager.reader
def quota_get(context, project_id, resource):
return _quota_get(context, project_id, resource)
@require_context
@main_context_manager.reader
def quota_get_all_by_project(context, project_id):
rows = (
model_query(context, models.Quota, read_deleted="no")
.filter_by(project_id=project_id)
.all()
)
result = {'project_id': project_id}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
@main_context_manager.writer
def quota_create(context, project_id, resource, limit):
quota_ref = models.Quota()
quota_ref.project_id = project_id
quota_ref.resource = resource
quota_ref.hard_limit = limit
quota_ref.save(context.session)
return quota_ref
@require_context
@main_context_manager.writer
def quota_update(context, project_id, resource, limit):
quota_ref = _quota_get(context, project_id, resource)
quota_ref.hard_limit = limit
quota_ref.save(context.session)
return quota_ref
@require_context
@main_context_manager.writer
def quota_update_resource(context, old_res, new_res):
quotas = (
model_query(context, models.Quota, read_deleted='no')
.filter_by(resource=old_res)
.all()
)
for quota in quotas:
quota.resource = new_res
quota.save(context.session)
return quota
@require_admin_context
@main_context_manager.writer
def quota_destroy(context, project_id, resource):
quota_ref = _quota_get(context, project_id, resource)
return quota_ref.delete(context.session)
###################
def _quota_class_get(context, class_name, resource):
result = (
model_query(context, models.QuotaClass, read_deleted="no")
.filter_by(class_name=class_name)
.filter_by(resource=resource)
.first()
)
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
return result
@require_context
@main_context_manager.reader
def quota_class_get(context, class_name, resource):
return _quota_class_get(context, class_name, resource)
@require_context
@main_context_manager.reader
def quota_class_get_defaults(context):
rows = (
model_query(context, models.QuotaClass, read_deleted="no")
.filter_by(class_name=_DEFAULT_QUOTA_NAME)
.all()
)
result = {'class_name': _DEFAULT_QUOTA_NAME}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
@main_context_manager.reader
def quota_class_get_all_by_name(context, class_name):
rows = (
model_query(context, models.QuotaClass, read_deleted="no")
.filter_by(class_name=class_name)
.all()
)
result = {'class_name': class_name}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
@main_context_manager.reader
def _quota_class_get_all_by_resource(context, resource):
result = (
model_query(context, models.QuotaClass, read_deleted="no")
.filter_by(resource=resource)
.all()
)
return result
@handle_db_data_error
@require_context
@main_context_manager.writer
def quota_class_create(context, class_name, resource, limit):
quota_class_ref = models.QuotaClass()
quota_class_ref.class_name = class_name
quota_class_ref.resource = resource
quota_class_ref.hard_limit = limit
quota_class_ref.save(context.session)
return quota_class_ref
@require_context
@main_context_manager.writer
def quota_class_update(context, class_name, resource, limit):
quota_class_ref = _quota_class_get(context, class_name, resource)
quota_class_ref.hard_limit = limit
quota_class_ref.save(context.session)
return quota_class_ref
@require_context
@main_context_manager.writer
def quota_class_update_resource(context, old_res, new_res):
quota_class_list = _quota_class_get_all_by_resource(context, old_res)
for quota_class in quota_class_list:
quota_class.resource = new_res
quota_class.save(context.session)
@require_context
@main_context_manager.writer
def quota_class_destroy(context, class_name, resource):
quota_class_ref = _quota_class_get(context, class_name, resource)
return quota_class_ref.delete(context.session)
@require_context
@main_context_manager.writer
def quota_class_destroy_all_by_name(context, class_name):
quota_classes = (
model_query(context, models.QuotaClass, read_deleted="no")
.filter_by(class_name=class_name)
.all()
)
for quota_class_ref in quota_classes:
quota_class_ref.delete(context.session)
###################
@require_context
@main_context_manager.reader
def quota_usage_get(context, project_id, resource):
result = (
model_query(context, models.QuotaUsage, read_deleted="no")
.filter_by(project_id=project_id)
.filter_by(resource=resource)
.first()
)
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
return result
@require_context
@main_context_manager.reader
def quota_usage_get_all_by_project(context, project_id):
rows = (
model_query(context, models.QuotaUsage, read_deleted="no")
.filter_by(project_id=project_id)
.all()
)
result = {'project_id': project_id}
for row in rows:
result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved)
return result
def _quota_usage_create(
context,
project_id,
resource,
in_use,
reserved,
until_refresh,
):
quota_usage_ref = models.QuotaUsage()
quota_usage_ref.project_id = project_id
quota_usage_ref.resource = resource
quota_usage_ref.in_use = in_use
quota_usage_ref.reserved = reserved
quota_usage_ref.until_refresh = until_refresh
quota_usage_ref.save(context.session)
return quota_usage_ref
def _reservation_create(
context,
uuid,
usage,
project_id,
resource,
delta,
expire,
):
usage_id = usage['id'] if usage else None
reservation_ref = models.Reservation()
reservation_ref.uuid = uuid
reservation_ref.usage_id = usage_id
reservation_ref.project_id = project_id
reservation_ref.resource = resource
reservation_ref.delta = delta
reservation_ref.expire = expire
reservation_ref.save(context.session)
return reservation_ref
# NOTE(johannes): The quota code uses SQL locking to ensure races don't
# cause under or over counting of resources. To avoid deadlocks, this
# code always acquires the lock on quota_usages before acquiring the lock
# on reservations.
def _get_quota_usages(context, project_id, resources=None):
# Broken out for testability
query = model_query(
context, models.QuotaUsage, read_deleted="no"
).filter_by(project_id=project_id)
if resources:
query = query.filter(models.QuotaUsage.resource.in_(list(resources)))
rows = query.order_by(models.QuotaUsage.id.asc()).with_for_update().all()
return {row.resource: row for row in rows}
def _get_quota_usages_by_resource(context, resource):
rows = (
model_query(context, models.QuotaUsage, deleted="no")
.filter_by(resource=resource)
.order_by(models.QuotaUsage.id.asc())
.with_for_update()
.all()
)
return rows
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def quota_usage_update_resource(context, old_res, new_res):
usages = _get_quota_usages_by_resource(context, old_res)
for usage in usages:
usage.resource = new_res
usage.until_refresh = 1
def _get_sync_updates(ctxt, project_id, resources, resource_name):
"""Return usage for a specific resource.
Resources are volumes, gigabytes, backups, snapshots, and also
volumes_ snapshots_ for each volume type.
"""
# Grab the sync routine
sync = QUOTA_SYNC_FUNCTIONS[resources[resource_name].sync]
# VolumeTypeResource includes the id and name of the resource.
volume_type_id = getattr(resources[resource_name], 'volume_type_id', None)
volume_type_name = getattr(
resources[resource_name], 'volume_type_name', None
)
updates = sync(
ctxt,
project_id,
volume_type_id=volume_type_id,
volume_type_name=volume_type_name,
)
return updates
def _is_duplicate(exc):
"""Check if an exception is caused by a unique constraint failure."""
return isinstance(exc, db_exc.DBDuplicateEntry)
@require_context
@oslo_db_api.wrap_db_retry(
max_retries=5, retry_on_deadlock=True, exception_checker=_is_duplicate
)
@main_context_manager.writer
def quota_reserve(
context,
resources,
quotas,
deltas,
expire,
until_refresh,
max_age,
project_id=None,
):
elevated = context.elevated()
if project_id is None:
project_id = context.project_id
# Loop until we can lock all the resource rows we'll be modifying
while True:
# Get the current usages and lock existing rows
usages = _get_quota_usages(
context, project_id, resources=deltas.keys()
)
missing = [res for res in deltas if res not in usages]
# If we have successfully locked all the rows we can continue.
# SELECT ... FOR UPDATE used in _get_quota usages cannot lock
# non-existing rows, so there can be races with other requests
# trying to create those rows.
if not missing:
break
# Create missing rows calculating current values instead of
# assuming there are no used resources as admins may have been
# using this mechanism to force quota usage refresh.
for resource in missing:
updates = _get_sync_updates(
elevated,
project_id,
resources,
resource,
)
_quota_usage_create(
elevated,
project_id,
resource,
updates[resource],
0,
until_refresh or None,
)
# NOTE: When doing the commit there can be a race condition with
# other service instances or thread that are also creating the
# same rows and in that case this will raise either a Deadlock
# exception (when multiple transactions were creating the same rows
# and the DB failed to acquire the row lock on the non-first
# transaction) or a DBDuplicateEntry exception if some other
# transaction created the row between us doing the
# _get_quota_usages and here. In both cases this transaction will
# be rolled back and the wrap_db_retry decorator will retry.
# Commit new rows to the DB.
context.session.commit()
# Start a new session before trying to lock all the rows again. By
# trying to get all the locks in a loop we can protect us against
# admins directly deleting DB rows.
context.session.begin()
# Handle usage refresh
for resource in deltas.keys():
# Do we need to refresh the usage?
refresh = False
if usages[resource].in_use < 0:
# If we created the entry right now we want to refresh.
# Negative in_use count indicates a desync, so try to
# heal from that...
refresh = True
elif usages[resource].until_refresh is not None:
usages[resource].until_refresh -= 1
if usages[resource].until_refresh <= 0:
refresh = True
elif (
max_age
and usages[resource].updated_at is not None
and (
(
timeutils.utcnow() - usages[resource].updated_at
).total_seconds()
>= max_age
)
):
refresh = True
# OK, refresh the usage
if refresh:
updates = _get_sync_updates(
elevated,
project_id,
resources,
resource,
)
# Updates will always contain a single resource usage matching
# the resource variable.
usages[resource].in_use = updates[resource]
usages[resource].until_refresh = until_refresh or None
# There are 3 cases where we want to update "until_refresh" in the
# DB: when we enabled it, when we disabled it, and when we changed
# to a value lower than the current remaining value.
else:
res_until = usages[resource].until_refresh
if (res_until is None and until_refresh) or (
(res_until or 0) > (until_refresh or 0)
):
usages[resource].until_refresh = until_refresh or None
# Check for deltas that would go negative
unders = [
r
for r, delta in deltas.items()
if delta < 0 and delta + usages[r].in_use < 0
]
# TODO(mc_nair): Should ignore/zero alloc if using non-nested driver
# Now, let's check the quotas
# NOTE(Vek): We're only concerned about positive increments.
# If a project has gone over quota, we want them to
# be able to reduce their usage without any
# problems.
overs = [
r
for r, delta in deltas.items()
if quotas[r] >= 0
and delta >= 0
and quotas[r] < delta + usages[r].total
]
# NOTE(Vek): The quota check needs to be in the transaction,
# but the transaction doesn't fail just because
# we're over quota, so the OverQuota raise is
# outside the transaction. If we did the raise
# here, our usage updates would be discarded, but
# they're not invalidated by being over-quota.
# Create the reservations
if not overs:
reservations = []
for resource, delta in deltas.items():
usage = usages[resource]
reservation = _reservation_create(
elevated,
str(uuid.uuid4()),
usage,
project_id,
resource,
delta,
expire,
)
reservations.append(reservation.uuid)
# Also update the reserved quantity
# NOTE(Vek): Again, we are only concerned here about
# positive increments. Here, though, we're
# worried about the following scenario:
#
# 1) User initiates resize down.
# 2) User allocates a new instance.
# 3) Resize down fails or is reverted.
# 4) User is now over quota.
#
# To prevent this, we only update the
# reserved value if the delta is positive.
if delta > 0:
usages[resource].reserved += delta
if unders:
LOG.warning(
"Reservation would make usage less than 0 for the "
"following resources, so on commit they will be "
"limited to prevent going below 0: %s",
unders,
)
if overs:
usages = {
k: dict(in_use=v.in_use, reserved=v.reserved)
for k, v in usages.items()
}
raise exception.OverQuota(
overs=sorted(overs), quotas=quotas, usages=usages
)
return reservations
def _quota_reservations(context, reservations):
"""Return the relevant reservations."""
# Get the listed reservations
return (
model_query(context, models.Reservation, read_deleted="no")
.filter(models.Reservation.uuid.in_(reservations))
.with_for_update()
.all()
)
def _get_reservation_resources(context, reservation_ids):
"""Return the relevant resources by reservations."""
reservations = (
model_query(context, models.Reservation, read_deleted="no")
.options(load_only(models.Reservation.resource))
.filter(models.Reservation.uuid.in_(reservation_ids))
.all()
)
return {r.resource for r in reservations}
def _dict_with_usage_id(usages):
return {row.id: row for row in usages.values()}
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def reservation_commit(context, reservations, project_id=None):
# NOTE: There's a potential race condition window with
# reservation_expire, since _get_reservation_resources does not lock
# the rows, but we won't fix it because:
# - Minuscule chance of happening, since quota expiration is usually
# very high
# - Solution could create a DB lock on rolling upgrades since we need
# to reverse the order of locking the rows.
usages = _get_quota_usages(
context,
project_id,
resources=_get_reservation_resources(context, reservations),
)
usages = _dict_with_usage_id(usages)
for reservation in _quota_reservations(context, reservations):
usage = usages[reservation.usage_id]
delta = reservation.delta
if delta >= 0:
usage.reserved -= min(delta, usage.reserved)
# For negative deltas make sure we never go into negative usage
elif -delta > usage.in_use:
delta = -usage.in_use
usage.in_use += delta
reservation.delete(context.session)
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def reservation_rollback(context, reservations, project_id=None):
# NOTE: There's a potential race condition window with
# reservation_expire, since _get_reservation_resources does not lock
# the rows, but we won't fix it because:
# - Minuscule chance of happening, since quota expiration is usually
# very high
# - Solution could create a DB lock on rolling upgrades since we need
# to reverse the order of locking the rows.
usages = _get_quota_usages(
context,
project_id,
resources=_get_reservation_resources(context, reservations),
)
usages = _dict_with_usage_id(usages)
for reservation in _quota_reservations(context, reservations):
usage = usages[reservation.usage_id]
if reservation.delta >= 0:
usage.reserved -= min(reservation.delta, usage.reserved)
reservation.delete(context.session)
@require_context
def quota_destroy_by_project(context, project_id):
"""Destroy all limit quotas associated with a project.
Leaves usage and reservation quotas intact.
"""
quota_destroy_all_by_project(context, project_id, only_quotas=True)
# TODO(stephenfin): No one is using this except 'quota_destroy_by_project'
# above, so the only_quotas=False path could be removed.
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def quota_destroy_all_by_project(context, project_id, only_quotas=False):
"""Destroy all quotas associated with a project.
This includes limit quotas, usage quotas and reservation quotas.
Optionally can only remove limit quotas and leave other types as they are.
:param context: The request context, for access checks.
:param project_id: The ID of the project being deleted.
:param only_quotas: Only delete limit quotas, leave other types intact.
"""
model_query(context, models.Quota).filter_by(project_id=project_id).update(
models.Quota.delete_values()
)
if only_quotas:
return
model_query(context, models.QuotaUsage, read_deleted="no").filter_by(
project_id=project_id
).update(models.QuotaUsage.delete_values())
model_query(context, models.Reservation, read_deleted="no").filter_by(
project_id=project_id
).update(models.Reservation.delete_values())
@require_admin_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def reservation_expire(context):
current_time = timeutils.utcnow()
results = (
model_query(context, models.Reservation, read_deleted="no")
.filter(models.Reservation.expire < current_time)
.with_for_update()
.all()
)
if results:
for reservation in results:
if reservation.delta >= 0:
reservation.usage.reserved -= min(
reservation.delta,
reservation.usage.reserved,
)
reservation.usage.save(context.session)
reservation.delete(context.session)
###################
@require_admin_context
@main_context_manager.writer
def volume_attach(context, values):
volume_attachment_ref = models.VolumeAttachment()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
volume_attachment_ref.update(values)
volume_attachment_ref.save(context.session)
return _attachment_get(context, values['id'])
@require_admin_context
@main_context_manager.writer
def volume_attached(
context,
attachment_id,
instance_uuid,
host_name,
mountpoint,
attach_mode='rw',
mark_attached=True,
):
"""This method updates a volume attachment entry.
This function saves the information related to a particular
attachment for a volume. It also updates the volume record
to mark the volume as attached or attaching.
The mark_attached argument is a boolean, when set to True,
we mark the volume as 'in-use' and the 'attachment' as
'attached', if False, we use 'attaching' for both of these
status settings.
"""
attach_status = fields.VolumeAttachStatus.ATTACHED
volume_status = 'in-use'
if not mark_attached:
attach_status = fields.VolumeAttachStatus.ATTACHING
volume_status = 'attaching'
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
volume_attachment_ref = _attachment_get(context, attachment_id)
updated_values = {
'mountpoint': mountpoint,
'attach_status': attach_status,
'instance_uuid': instance_uuid,
'attached_host': host_name,
'attach_time': timeutils.utcnow(),
'attach_mode': attach_mode,
'updated_at': volume_attachment_ref.updated_at,
}
volume_attachment_ref.update(updated_values)
volume_attachment_ref.save(context.session)
del updated_values['updated_at']
volume_ref = _volume_get(context, volume_attachment_ref['volume_id'])
volume_ref['status'] = volume_status
volume_ref['attach_status'] = attach_status
volume_ref.save(context.session)
return volume_ref, updated_values
@handle_db_data_error
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def volume_create(context, values):
values['volume_metadata'] = _metadata_refs(
values.get('metadata'),
models.VolumeMetadata,
)
if is_admin_context(context):
values['volume_admin_metadata'] = _metadata_refs(
values.get('admin_metadata'),
models.VolumeAdminMetadata,
)
elif values.get('volume_admin_metadata'):
del values['volume_admin_metadata']
volume_ref = models.Volume()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
volume_ref.update(values)
context.session.add(volume_ref)
return _volume_get(context, values['id'])
def get_booleans_for_table(table_name):
booleans = set()
table = getattr(models, table_name.capitalize())
if hasattr(table, '__table__'):
columns = table.__table__.columns
for column in columns:
if isinstance(column.type, sqltypes.Boolean):
booleans.add(column.name)
return booleans
@require_admin_context
@main_context_manager.reader
def volume_data_get_for_host(context, host, count_only=False):
host_attr = models.Volume.host
conditions = [host_attr == host, host_attr.op('LIKE')(host + '#%')]
if count_only:
result = (
model_query(
context, func.count(models.Volume.id), read_deleted="no"
)
.filter(or_(*conditions))
.first()
)
return result[0] or 0
else:
result = (
model_query(
context,
func.count(models.Volume.id),
func.sum(models.Volume.size),
read_deleted="no",
)
.filter(or_(*conditions))
.first()
)
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@require_admin_context
def _volume_data_get_for_project(
context,
project_id,
volume_type_id=None,
host=None,
skip_internal=True,
):
model = models.Volume
query = model_query(
context, func.count(model.id), func.sum(model.size), read_deleted="no"
).filter_by(project_id=project_id)
# By default we skip temporary resources creted for internal usage and
# migration destination volumes.
if skip_internal:
query = query.filter(model.use_quota)
if host:
query = query.filter(_filter_host(model.host, host))
if volume_type_id:
query = query.filter_by(volume_type_id=volume_type_id)
result = query.first()
# NOTE(vish): convert None to 0
return result[0] or 0, result[1] or 0
@require_admin_context
@main_context_manager.reader
def volume_data_get_for_project(context, project_id, host=None):
return _volume_data_get_for_project(
context,
project_id,
host=host,
skip_internal=False,
)
VOLUME_DEPENDENT_MODELS = frozenset(
[
models.VolumeMetadata,
models.VolumeAdminMetadata,
models.Snapshot,
models.Transfer,
models.VolumeGlanceMetadata,
models.VolumeAttachment,
]
)
@require_admin_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def volume_destroy(context, volume_id):
now = timeutils.utcnow()
updated_values = {
'status': 'deleted',
'deleted': True,
'deleted_at': now,
'migration_status': None,
}
query = model_query(context, models.Volume).filter_by(id=volume_id)
entity = query.column_descriptions[0]['entity']
updated_values['updated_at'] = entity.updated_at
query.update(updated_values)
for model in VOLUME_DEPENDENT_MODELS:
query = model_query(context, model).filter_by(volume_id=volume_id)
entity = query.column_descriptions[0]['entity']
query.update(
{
'deleted': True,
'deleted_at': now,
'updated_at': entity.updated_at,
}
)
del updated_values['updated_at']
return updated_values
def _include_in_cluster(context, cluster, model, partial_rename, filters):
"""Generic include in cluster method.
When we include resources in a cluster we have to be careful to preserve
the addressing sections that have not been provided. That's why we allow
partial_renaming, so we can preserve the backend and pool if we are only
providing host/cluster level information, and preserve pool information if
we only provide backend level information.
For example when we include a host in a cluster we receive calls with
filters like {'host': 'localhost@lvmdriver-1'} and cluster with something
like 'mycluster@lvmdriver-1'. Since in the DB the resources will have the
host field set to something like 'localhost@lvmdriver-1#lvmdriver-1' we
want to include original pool in the new cluster_name. So we want to store
in cluster_name value 'mycluster@lvmdriver-1#lvmdriver-1'.
"""
filters = _clean_filters(filters)
if filters and not is_valid_model_filters(model, filters):
return None
query = context.session.query(model)
if hasattr(model, 'deleted'):
query = query.filter_by(deleted=False)
# cluster_name and host are special filter cases
for field in {'cluster_name', 'host'}.intersection(filters):
value = filters.pop(field)
# We do a special backend filter
query = query.filter(_filter_host(getattr(model, field), value))
# If we want to do a partial rename and we haven't set the cluster
# already, the value we want to set is a SQL replace of existing field
# value.
if partial_rename and isinstance(cluster, str):
cluster = func.replace(getattr(model, field), value, cluster)
query = query.filter_by(**filters)
result = query.update({'cluster_name': cluster}, synchronize_session=False)
return result
@require_admin_context
@main_context_manager.writer
def volume_include_in_cluster(
context, cluster, partial_rename=True, **filters
):
"""Include all volumes matching the filters into a cluster."""
return _include_in_cluster(
context, cluster, models.Volume, partial_rename, filters
)
def _get_statuses_from_attachments(context, volume_id):
"""Get volume status and attach_status based on existing attachments."""
# NOTE: Current implementation ignores attachments on error attaching,
# since they will not have been used by any consumer because os-brick's
# connect_volume has not been called yet. This leads to cases where a
# volume will be in 'available' state yet have attachments.
# If we sort status of attachments alphabetically, ignoring errors, the
# first element will be the attachment status for the volume:
# attached > attaching > detaching > reserved
attach_status = (
context.session.query(models.VolumeAttachment.attach_status)
.filter_by(deleted=False)
.filter_by(volume_id=volume_id)
.filter(~models.VolumeAttachment.attach_status.startswith('error_'))
.order_by(models.VolumeAttachment.attach_status.asc())
.limit(1)
.scalar()
)
# No volume attachment records means the volume is detached.
attach_status = attach_status or 'detached'
# Check cases where volume status is different from attach status, and
# default to the same value if it's not one of those cases.
status = ATTACH_STATUS_MAP.get(attach_status, attach_status)
return status, attach_status
@require_admin_context
@main_context_manager.writer
def volume_detached(context, volume_id, attachment_id):
"""Delete an attachment and update the volume accordingly.
After marking the attachment as detached the method will decide the status
and attach_status values for the volume based on the current status and the
remaining attachments and their status.
Volume status may be changed to: in-use, attaching, detaching, reserved, or
available.
Volume attach_status will be changed to one of: attached, attaching,
detaching, reserved, or detached.
"""
# NOTE(jdg): This is a funky band-aid for the earlier attempts at
# multiattach, it's a bummer because these things aren't really being used
# but at the same time we don't want to break them until we work out the
# new proposal for multi-attach
# Only load basic volume info necessary to check various status and use
# the volume row as a lock with the for_update.
volume = _volume_get(
context,
volume_id,
joined_load=False,
for_update=True,
)
try:
attachment = _attachment_get(context, attachment_id)
attachment_updates = attachment.delete(context.session)
except exception.VolumeAttachmentNotFound:
attachment_updates = None
status, attach_status = _get_statuses_from_attachments(context, volume_id)
volume_updates = {
'updated_at': volume.updated_at,
'attach_status': attach_status,
}
# Hide volume status update to available on volume migration or upload,
# as status is updated later on those flows.
if (
attach_status != 'detached'
or (not volume.migration_status and volume.status != 'uploading')
or volume.migration_status in ('success', 'error')
):
volume_updates['status'] = status
volume.update(volume_updates)
volume.save(context.session)
del volume_updates['updated_at']
return volume_updates, attachment_updates
def _process_model_like_filter(model, query, filters):
"""Applies regex expression filtering to a query.
:param model: model to apply filters to
:param query: query to apply filters to
:param filters: dictionary of filters with regex values
:returns: the updated query.
"""
if query is None:
return query
for key in sorted(filters):
column_attr = getattr(model, key)
if 'property' == type(column_attr).__name__:
continue
value = filters[key]
if not (isinstance(value, (str, int))):
continue
query = query.filter(column_attr.op('LIKE')(u'%%%s%%' % value))
return query
def apply_like_filters(model):
def decorator_filters(process_exact_filters):
def _decorator(query, filters):
exact_filters = filters.copy()
regex_filters = {}
for key, value in filters.items():
# NOTE(tommylikehu): For inexact match, the filter keys
# are in the format of 'key~=value'
if key.endswith('~'):
exact_filters.pop(key)
regex_filters[key.rstrip('~')] = value
query = process_exact_filters(query, exact_filters)
return _process_model_like_filter(model, query, regex_filters)
return _decorator
return decorator_filters
@require_context
def _volume_get_query(context, project_only=False, joined_load=True):
"""Get the query to retrieve the volume.
:param context: the context used to run the method _volume_get_query
:param project_only: the boolean used to decide whether to query the
volume in the current project or all projects
:param joined_load: the boolean used to decide whether the query loads
the other models, which join the volume model in the database.
Currently, the False value for this parameter is specially for the case
of updating database during volume migration
:returns: updated query or None
"""
if not joined_load:
return model_query(context, models.Volume, project_only=project_only)
if is_admin_context(context):
return (
model_query(
context,
models.Volume,
project_only=project_only,
)
.options(joinedload(models.Volume.volume_metadata))
.options(joinedload(models.Volume.volume_admin_metadata))
.options(joinedload(models.Volume.volume_type))
.options(joinedload(models.Volume.volume_attachment))
.options(joinedload(models.Volume.consistencygroup))
.options(joinedload(models.Volume.group))
)
else:
return (
model_query(
context,
models.Volume,
project_only=project_only,
)
.options(joinedload(models.Volume.volume_metadata))
.options(joinedload(models.Volume.volume_type))
.options(joinedload(models.Volume.volume_attachment))
.options(joinedload(models.Volume.consistencygroup))
.options(joinedload(models.Volume.group))
)
@require_context
def _volume_get(context, volume_id, joined_load=True, for_update=False):
result = _volume_get_query(
context,
project_only=True,
joined_load=joined_load,
)
if joined_load:
result = result.options(
joinedload(models.Volume.volume_type).joinedload(
models.VolumeType.extra_specs
)
)
if for_update:
result = result.with_for_update()
result = result.filter_by(id=volume_id).first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
return result
def _attachment_get_all(
context,
filters=None,
marker=None,
limit=None,
offset=None,
sort_keys=None,
sort_dirs=None,
):
if filters and not is_valid_model_filters(
models.VolumeAttachment,
filters,
exclude_list=['project_id'],
):
return []
# Generate the paginate query
query = _generate_paginate_query(
context,
marker,
limit,
sort_keys,
sort_dirs,
filters,
offset,
models.VolumeAttachment,
)
if query is None:
return []
return query.all()
def _attachment_get(
context,
attachment_id,
read_deleted=False,
project_only=True,
):
result = (
model_query(
context, models.VolumeAttachment, read_deleted=read_deleted
)
.filter_by(id=attachment_id)
.options(joinedload(models.VolumeAttachment.volume))
.first()
)
if not result:
raise exception.VolumeAttachmentNotFound(
filter='attachment_id = %s' % attachment_id,
)
return result
def _attachment_get_query(context, project_only=False):
return model_query(
context,
models.VolumeAttachment,
project_only=project_only,
).options(joinedload(models.VolumeAttachment.volume))
@apply_like_filters(model=models.VolumeAttachment)
def _process_attachment_filters(query, filters):
if filters:
project_id = filters.pop('project_id', None)
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.VolumeAttachment, filters):
return
if project_id:
volume = models.Volume
query = query.filter(
volume.id == models.VolumeAttachment.volume_id,
volume.project_id == project_id,
)
query = query.filter_by(**filters)
return query
@require_admin_context
@main_context_manager.reader
def volume_attachment_get_all(
context,
filters=None,
marker=None,
limit=None,
offset=None,
sort_keys=None,
sort_dirs=None,
):
"""Retrieve all Attachment records with filter and pagination options."""
return _attachment_get_all(
context, filters, marker, limit, offset, sort_keys, sort_dirs
)
@require_context
@main_context_manager.reader
def volume_attachment_get_all_by_volume_id(context, volume_id):
result = (
model_query(context, models.VolumeAttachment)
.filter_by(volume_id=volume_id)
.filter(
models.VolumeAttachment.attach_status
!= fields.VolumeAttachStatus.DETACHED
)
.options(joinedload(models.VolumeAttachment.volume))
.all()
)
return result
# FIXME(jdg): Not using filters
@require_context
@main_context_manager.reader
def volume_attachment_get_all_by_host(context, host, filters=None):
result = (
model_query(context, models.VolumeAttachment)
.filter_by(attached_host=host)
.filter(
models.VolumeAttachment.attach_status
!= fields.VolumeAttachStatus.DETACHED
)
.options(joinedload(models.VolumeAttachment.volume))
.all()
)
return result
@require_context
@main_context_manager.reader
def volume_attachment_get(context, attachment_id):
"""Fetch the specified attachment record."""
return _attachment_get(context, attachment_id)
# FIXME(jdg): Not using filters
@require_context
@main_context_manager.reader
def volume_attachment_get_all_by_instance_uuid(
context,
instance_uuid,
filters=None,
):
"""Fetch all attachment records associated with the specified instance."""
result = (
model_query(context, models.VolumeAttachment)
.filter_by(instance_uuid=instance_uuid)
.filter(
models.VolumeAttachment.attach_status
!= fields.VolumeAttachStatus.DETACHED
)
.options(joinedload(models.VolumeAttachment.volume))
.all()
)
return result
@require_context
@main_context_manager.reader
def volume_attachment_get_all_by_project(
context,
project_id,
filters=None,
marker=None,
limit=None,
offset=None,
sort_keys=None,
sort_dirs=None,
):
"""Retrieve all Attachment records for specific project."""
authorize_project_context(context, project_id)
if not filters:
filters = {}
else:
filters = filters.copy()
filters['project_id'] = project_id
return _attachment_get_all(
context,
filters,
marker,
limit,
offset,
sort_keys,
sort_dirs,
)
@require_admin_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def attachment_destroy(context, attachment_id):
"""Destroy the specified attachment record."""
utcnow = timeutils.utcnow()
query = model_query(context, models.VolumeAttachment).filter_by(
id=attachment_id
)
entity = query.column_descriptions[0]['entity']
updated_values = {
'attach_status': fields.VolumeAttachStatus.DELETED,
'deleted': True,
'deleted_at': utcnow,
'updated_at': entity.updated_at,
}
query.update(updated_values)
query = model_query(context, models.AttachmentSpecs).filter_by(
attachment_id=attachment_id
)
entity = query.column_descriptions[0]['entity']
query.update(
{
'deleted': True,
'deleted_at': utcnow,
'updated_at': entity.updated_at,
}
)
del updated_values['updated_at']
return updated_values
@require_context
@main_context_manager.writer
def attachment_specs_exist(context):
query = model_query(context, models.AttachmentSpecs, read_deleted='no')
return bool(query.first())
def _attachment_specs_query(context, attachment_id):
return model_query(
context, models.AttachmentSpecs, read_deleted="no"
).filter_by(attachment_id=attachment_id)
@require_context
@main_context_manager.reader
def attachment_specs_get(context, attachment_id):
"""DEPRECATED: Fetch the attachment_specs for the specified attachment."""
rows = _attachment_specs_query(context, attachment_id).all()
result = {row['key']: row['value'] for row in rows}
return result
@require_context
@main_context_manager.writer
def attachment_specs_delete(context, attachment_id, key):
"""DEPRECATED: Delete attachment_specs for the specified attachment."""
_attachment_specs_get_item(context, attachment_id, key)
query = _attachment_specs_query(context, attachment_id).filter_by(key=key)
entity = query.column_descriptions[0]['entity']
query.update(
{
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': entity.updated_at,
}
)
@require_context
def _attachment_specs_get_item(context, attachment_id, key):
result = (
_attachment_specs_query(context, attachment_id)
.filter_by(key=key)
.first()
)
if not result:
raise exception.AttachmentSpecsNotFound(
specs_key=key,
attachment_id=attachment_id,
)
return result
@handle_db_data_error
@require_context
@main_context_manager.writer
def attachment_specs_update_or_create(context, attachment_id, specs):
"""DEPRECATED: Update attachment_specs for the specified attachment."""
spec_ref = None
for key, value in specs.items():
try:
spec_ref = _attachment_specs_get_item(context, attachment_id, key)
except exception.AttachmentSpecsNotFound:
spec_ref = models.AttachmentSpecs()
spec_ref.update(
{
"key": key,
"value": value,
"attachment_id": attachment_id,
"deleted": False,
}
)
spec_ref.save(context.session)
return specs
@require_context
@main_context_manager.reader
def volume_get(context, volume_id):
return _volume_get(context, volume_id)
@require_admin_context
@main_context_manager.reader
def volume_get_all(
context,
marker=None,
limit=None,
sort_keys=None,
sort_dirs=None,
filters=None,
offset=None,
):
"""Retrieves all volumes.
If no sort parameters are specified then the returned volumes are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching is used for other
values, see _process_volume_filters function for more information
:returns: list of matching volumes
"""
# Generate the query
query = _generate_paginate_query(
context,
marker,
limit,
sort_keys,
sort_dirs,
filters,
offset,
models.Volume,
)
# No volumes would match, return empty list
if query is None:
return []
return query.all()
@require_context
@main_context_manager.reader
def get_volume_summary(context, project_only, filters=None):
"""Retrieves all volumes summary.
:param context: context to query under
:param project_only: limit summary to project volumes
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching is used for other
values, see _process_volume_filters function for more information
:returns: volume summary
"""
if not (project_only or is_admin_context(context)):
raise exception.AdminRequired()
query = model_query(
context,
func.count(models.Volume.id),
func.sum(models.Volume.size),
read_deleted="no",
)
if project_only:
query = query.filter_by(project_id=context.project_id)
if filters:
query = _process_volume_filters(query, filters)
if query is None:
return []
result = query.first()
query_metadata = model_query(
context,
models.VolumeMetadata.key,
models.VolumeMetadata.value,
read_deleted="no",
)
if project_only:
query_metadata = query_metadata.join(
models.Volume, models.Volume.id == models.VolumeMetadata.volume_id
).filter_by(project_id=context.project_id)
result_metadata = query_metadata.distinct().all()
result_metadata_list = collections.defaultdict(list)
for key, value in result_metadata:
result_metadata_list[key].append(value)
return (result[0] or 0, result[1] or 0, result_metadata_list)
@require_admin_context
@main_context_manager.reader
def volume_get_all_by_host(context, host, filters=None):
"""Retrieves all volumes hosted on a host.
:param context: context to query under
:param host: host for all volumes being retrieved
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching is used for other
values, see _process_volume_filters function for more information
:returns: list of matching volumes
"""
# As a side effect of the introduction of pool-aware scheduler,
# newly created volumes will have pool information appended to
# 'host' field of a volume record. So a volume record in DB can
# now be either form below:
# Host
# Host#Pool
if host and isinstance(host, str):
host_attr = getattr(models.Volume, 'host')
conditions = [host_attr == host, host_attr.op('LIKE')(host + '#%')]
query = _volume_get_query(context).filter(or_(*conditions))
if filters:
query = _process_volume_filters(query, filters)
# No volumes would match, return empty list
if query is None:
return []
return query.all()
elif not host:
return []
@require_context
@main_context_manager.reader
def volume_get_all_by_group(context, group_id, filters=None):
"""Retrieves all volumes associated with the group_id.
:param context: context to query under
:param group_id: consistency group ID for all volumes being retrieved
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching is used for other
values, see _process_volume_filters function for more information
:returns: list of matching volumes
"""
query = _volume_get_query(context).filter_by(consistencygroup_id=group_id)
if filters:
query = _process_volume_filters(query, filters)
# No volumes would match, return empty list
if query is None:
return []
return query.all()
@require_admin_context
@main_context_manager.writer
def volume_update_all_by_service(context):
"""Ensure volumes have the correct service_uuid value for their host.
In some deployment tools, when performing an upgrade, all service records
are recreated including c-vol service which gets a new record in the
services table, though its host name is constant. Later we then delete the
old service record.
As a consequence, the volumes have the right host name but the service
UUID needs to be updated to the ID of the new service record.
:param context: context to query under
"""
# Get all cinder-volume services
services = service_get_all(context, binary='cinder-volume')
for service in services:
query = model_query(context, models.Volume)
query = query.filter(
_filter_host(
models.Volume.host, service.host),
models.Volume.service_uuid != service.uuid)
query.update(
{"service_uuid": service.uuid}, synchronize_session=False)
@require_context
@main_context_manager.reader
def volume_get_all_by_generic_group(context, group_id, filters=None):
"""Retrieves all volumes associated with the group_id.
:param context: context to query under
:param group_id: group ID for all volumes being retrieved
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching is used for other
values, see _process_volume_filters function for more information
:returns: list of matching volumes
"""
query = _volume_get_query(context).filter_by(group_id=group_id)
if filters:
query = _process_volume_filters(query, filters)
# No volumes would match, return empty list
if query is None:
return []
return query.all()
@require_context
@main_context_manager.reader
def volume_get_all_by_project(
context,
project_id,
marker,
limit,
sort_keys=None,
sort_dirs=None,
filters=None,
offset=None,
):
"""Retrieves all volumes in a project.
If no sort parameters are specified then the returned volumes are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param project_id: project for all volumes being retrieved
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching is used for other
values, see _process_volume_filters function for more information
:returns: list of matching volumes
"""
authorize_project_context(context, project_id)
# Add in the project filter without modifying the given filters
filters = filters.copy() if filters else {}
filters['project_id'] = project_id
# Generate the query
query = _generate_paginate_query(
context,
marker,
limit,
sort_keys,
sort_dirs,
filters,
offset,
models.Volume,
)
# No volumes would match, return empty list
if query is None:
return []
return query.all()
def _generate_paginate_query(
context,
marker,
limit,
sort_keys,
sort_dirs,
filters,
offset=None,
paginate_type=models.Volume,
):
"""Generate the query to include the filters and the paginate options.
Returns a query with sorting / pagination criteria added or None
if the given filters will not yield any results.
:param context: context to query under
:param marker: the last item of the previous page; we returns the next
results after this value.
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching is used for other
values, see _process_volume_filters function for more information
:param offset: number of items to skip
:param paginate_type: type of pagination to generate
:returns: updated query or None
"""
get_query, process_filters, get = PAGINATION_HELPERS[paginate_type]
sort_keys, sort_dirs = process_sort_params(
sort_keys,
sort_dirs,
default_dir='desc',
)
query = get_query(context)
if filters:
query = process_filters(query, filters)
if query is None:
return None
marker_object = None
if marker is not None:
marker_object = get(context, marker)
return sqlalchemyutils.paginate_query(
query,
paginate_type,
limit,
sort_keys,
marker=marker_object,
sort_dirs=sort_dirs,
offset=offset,
)
@main_context_manager.reader
def calculate_resource_count(context, resource_type, filters):
"""Calculate total count with filters applied"""
if resource_type not in CALCULATE_COUNT_HELPERS.keys():
msg = _("Model %s doesn't support counting resource.")
raise exception.InvalidInput(reason=msg % resource_type)
get_query, process_filters = CALCULATE_COUNT_HELPERS[resource_type]
query = get_query(context, joined_load=False)
if filters:
query = process_filters(query, filters)
if query is None:
return 0
return query.with_entities(func.count()).scalar()
@apply_like_filters(model=models.Volume)
def _process_volume_filters(query, filters):
"""Common filter processing for Volume queries.
Filter values that are in lists, tuples, or sets cause an 'IN' operator
to be used, while exact matching ('==' operator) is used for other values.
A filter key/value of 'no_migration_targets'=True causes volumes with
either a NULL 'migration_status' or a 'migration_status' that does not
start with 'target:' to be retrieved.
A 'metadata' filter key must correspond to a dictionary value of metadata
key-value pairs.
:param query: Model query to use
:param filters: dictionary of filters
:returns: updated query or None
"""
filters = filters.copy()
# 'no_migration_targets' is unique, must be either NULL or
# not start with 'target:'
if filters.get('no_migration_targets', False):
filters.pop('no_migration_targets')
try:
column_attr = getattr(models.Volume, 'migration_status')
conditions = [
column_attr == None, # noqa
column_attr.op('NOT LIKE')('target:%'),
]
query = query.filter(or_(*conditions))
except AttributeError:
LOG.debug("'migration_status' column could not be found.")
return None
host = filters.pop('host', None)
if host:
query = query.filter(_filter_host(models.Volume.host, host))
cluster_name = filters.pop('cluster_name', None)
if cluster_name:
query = query.filter(
_filter_host(models.Volume.cluster_name, cluster_name),
)
for time_comparison_filter in ['created_at', 'updated_at']:
if filters.get(time_comparison_filter, None):
time_filter_dict = filters.pop(time_comparison_filter)
try:
query = query.filter(
_filter_time_comparison(
getattr(models.Volume, time_comparison_filter),
time_filter_dict,
),
)
except AttributeError:
LOG.debug(
"%s column could not be found.",
time_comparison_filter,
)
return None
# Apply exact match filters for everything else, ensure that the
# filter value exists on the model
for key in filters.keys():
# metadata/glance_metadata is unique, must be a dict
if key in ('metadata', 'glance_metadata'):
if not isinstance(filters[key], dict):
LOG.debug("'%s' filter value is not valid.", key)
return None
continue
try:
column_attr = getattr(models.Volume, key)
# Do not allow relationship properties since those require
# schema specific knowledge
prop = getattr(column_attr, 'property')
if isinstance(prop, RelationshipProperty):
LOG.debug(
"'%s' filter key is not valid, it maps to a relationship.",
key,
)
return None
except AttributeError:
LOG.debug("'%s' filter key is not valid.", key)
return None
# Holds the simple exact matches
filter_dict = {}
# Iterate over all filters, special case the filter if necessary
for key, value in filters.items():
if key == 'metadata':
# model.VolumeMetadata defines the backref to Volumes as
# 'volume_metadata' or 'volume_admin_metadata', use those as
# column attribute keys
col_attr = getattr(models.Volume, 'volume_metadata')
col_ad_attr = getattr(models.Volume, 'volume_admin_metadata')
for k, v in value.items():
query = query.filter(
or_(
col_attr.any(key=k, value=v),
col_ad_attr.any(key=k, value=v),
)
)
elif key == 'glance_metadata':
# use models.Volume.volume_glance_metadata as column attribute key.
col_gl_attr = models.Volume.volume_glance_metadata
for k, v in value.items():
query = query.filter(col_gl_attr.any(key=k, value=v))
elif isinstance(value, (list, tuple, set, frozenset)):
# Looking for values in a list; apply to query directly
column_attr = getattr(models.Volume, key)
query = query.filter(column_attr.in_(value))
else:
# OK, simple exact match; save for later
filter_dict[key] = value
# Apply simple exact matches
if filter_dict:
query = query.filter_by(**filter_dict)
return query
def process_sort_params(
sort_keys,
sort_dirs,
default_keys=None,
default_dir='asc',
):
"""Process the sort parameters to include default keys.
Creates a list of sort keys and a list of sort directions. Adds the default
keys to the end of the list if they are not already included.
When adding the default keys to the sort keys list, the associated
direction is:
1) The first element in the 'sort_dirs' list (if specified), else
2) 'default_dir' value (Note that 'asc' is the default value since this is
the default in sqlalchemy.utils.paginate_query)
:param sort_keys: List of sort keys to include in the processed list
:param sort_dirs: List of sort directions to include in the processed list
:param default_keys: List of sort keys that need to be included in the
processed list, they are added at the end of the list if not already
specified.
:param default_dir: Sort direction associated with each of the default
keys that are not supplied, used when they are added to the processed
list
:returns: list of sort keys, list of sort directions
:raise exception.InvalidInput: If more sort directions than sort keys
are specified or if an invalid sort direction is specified
"""
if default_keys is None:
default_keys = ['created_at', 'id']
# Determine direction to use for when adding default keys
if sort_dirs and len(sort_dirs):
default_dir_value = sort_dirs[0]
else:
default_dir_value = default_dir
# Create list of keys (do not modify the input list)
if sort_keys:
result_keys = list(sort_keys)
else:
result_keys = []
# If a list of directions is not provided, use the default sort direction
# for all provided keys.
if sort_dirs:
result_dirs = []
# Verify sort direction
for sort_dir in sort_dirs:
if sort_dir not in ('asc', 'desc'):
msg = _("Unknown sort direction, must be 'desc' or 'asc'.")
raise exception.InvalidInput(reason=msg)
result_dirs.append(sort_dir)
else:
result_dirs = [default_dir_value for _sort_key in result_keys]
# Ensure that the key and direction length match
while len(result_dirs) < len(result_keys):
result_dirs.append(default_dir_value)
# Unless more direction are specified, which is an error
if len(result_dirs) > len(result_keys):
msg = _("Sort direction array size exceeds sort key array size.")
raise exception.InvalidInput(reason=msg)
# Ensure defaults are included
for key in default_keys:
if key not in result_keys:
result_keys.append(key)
result_dirs.append(default_dir_value)
return result_keys, result_dirs
@handle_db_data_error
@require_context
@main_context_manager.writer
def volume_update(context, volume_id, values):
metadata = values.get('metadata')
if metadata is not None:
_volume_user_metadata_update(
context,
volume_id,
values.pop('metadata'),
delete=True,
)
admin_metadata = values.get('admin_metadata')
if is_admin_context(context) and admin_metadata is not None:
_volume_admin_metadata_update(
context,
volume_id,
values.pop('admin_metadata'),
delete=True,
)
query = _volume_get_query(context, joined_load=False)
result = query.filter_by(id=volume_id).update(values)
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
@handle_db_data_error
@require_context
@main_context_manager.writer
def volumes_update(context, values_list):
volume_refs = []
for values in values_list:
volume_id = values['id']
values.pop('id')
metadata = values.get('metadata')
if metadata is not None:
_volume_user_metadata_update(
context,
volume_id,
values.pop('metadata'),
delete=True,
)
admin_metadata = values.get('admin_metadata')
if is_admin_context(context) and admin_metadata is not None:
_volume_admin_metadata_update(
context,
volume_id,
values.pop('admin_metadata'),
delete=True,
)
volume_ref = _volume_get(context, volume_id)
volume_ref.update(values)
volume_refs.append(volume_ref)
return volume_refs
@require_context
@main_context_manager.writer
def volume_attachment_update(context, attachment_id, values):
query = model_query(context, models.VolumeAttachment)
result = query.filter_by(id=attachment_id).update(values)
if not result:
raise exception.VolumeAttachmentNotFound(
filter='attachment_id = ' + attachment_id
)
@main_context_manager.writer
def volume_update_status_based_on_attachment(context, volume_id):
"""Update volume status based on attachment.
Get volume and check if 'volume_attachment' parameter is present in volume.
If 'volume_attachment' is None then set volume status to 'available'
else set volume status to 'in-use'.
:param context: context to query under
:param volume_id: id of volume to be updated
:returns: updated volume
"""
volume_ref = _volume_get(context, volume_id)
# We need to get and update volume using same session because
# there is possibility that instance is deleted between the 'get'
# and 'update' volume call.
if not volume_ref['volume_attachment']:
volume_ref.update({'status': 'available'})
else:
volume_ref.update({'status': 'in-use'})
return volume_ref
def volume_has_snapshots_filter():
return sql.exists().where(
and_(
models.Volume.id == models.Snapshot.volume_id,
~models.Snapshot.deleted,
)
)
def volume_has_undeletable_snapshots_filter():
deletable_statuses = ['available', 'error']
return sql.exists().where(
and_(
models.Volume.id == models.Snapshot.volume_id,
~models.Snapshot.deleted,
or_(
models.Snapshot.cgsnapshot_id != None, # noqa: != None
models.Snapshot.status.notin_(deletable_statuses),
),
or_(
models.Snapshot.group_snapshot_id != None, # noqa: != None
models.Snapshot.status.notin_(deletable_statuses),
),
)
)
def volume_has_snapshots_in_a_cgsnapshot_filter():
return sql.exists().where(
and_(
models.Volume.id == models.Snapshot.volume_id,
models.Snapshot.cgsnapshot_id.isnot(None),
)
)
def volume_has_attachments_filter():
return sql.exists().where(
and_(
models.Volume.id == models.VolumeAttachment.volume_id,
models.VolumeAttachment.attach_status
!= fields.VolumeAttachStatus.DETACHED,
~models.VolumeAttachment.deleted,
)
)
def volume_qos_allows_retype(new_vol_type):
"""Filter to check that qos allows retyping the volume to new_vol_type.
Returned sqlalchemy filter will evaluate to True when volume's status is
available or when it's 'in-use' but the qos in new_vol_type is the same as
the qos of the volume or when it doesn't exist a consumer spec key that
specifies anything other than the back-end in any of the 2 volume_types.
"""
# Query to get the qos of the volume type new_vol_type
q = (
sql.select(models.VolumeType.qos_specs_id)
.where(
and_(
~models.VolumeType.deleted,
models.VolumeType.id == new_vol_type,
)
)
.scalar_subquery()
)
# Construct the filter to check qos when volume is 'in-use'
return or_(
# If volume is available
models.Volume.status == 'available',
# Or both volume types have the same qos specs
sql.exists().where(
and_(
~models.VolumeType.deleted,
models.VolumeType.id == models.Volume.volume_type_id,
models.VolumeType.qos_specs_id == q,
)
),
# Or they are different specs but they are handled by the backend or
# it is not specified. The way SQL evaluatels value != 'back-end'
# makes it result in False not only for 'back-end' values but for
# NULL as well, and with the double negation we ensure that we only
# allow QoS with 'consumer' values of 'back-end' and NULL.
and_(
~sql.exists().where(
and_(
~models.VolumeType.deleted,
models.VolumeType.id == models.Volume.volume_type_id,
(
models.VolumeType.qos_specs_id
== models.QualityOfServiceSpecs.specs_id
),
models.QualityOfServiceSpecs.key == 'consumer',
models.QualityOfServiceSpecs.value != 'back-end',
)
),
~sql.exists().where(
and_(
~models.VolumeType.deleted,
models.VolumeType.id == new_vol_type,
(
models.VolumeType.qos_specs_id
== models.QualityOfServiceSpecs.specs_id
),
models.QualityOfServiceSpecs.key == 'consumer',
models.QualityOfServiceSpecs.value != 'back-end',
)
),
),
)
def volume_has_other_project_snp_filter():
return sql.exists().where(
and_(
models.Volume.id == models.Snapshot.volume_id,
models.Volume.project_id != models.Snapshot.project_id,
)
)
####################
def _volume_x_metadata_get_query(context, volume_id, model):
return model_query(context, model, read_deleted="no").filter_by(
volume_id=volume_id
)
def _volume_x_metadata_get(context, volume_id, model):
rows = _volume_x_metadata_get_query(context, volume_id, model).all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
def _volume_x_metadata_get_item(context, volume_id, key, model, notfound_exec):
result = (
_volume_x_metadata_get_query(context, volume_id, model)
.filter_by(key=key)
.first()
)
if not result:
if model is models.VolumeGlanceMetadata:
raise notfound_exec(id=volume_id)
else:
raise notfound_exec(metadata_key=key, volume_id=volume_id)
return result
def _volume_x_metadata_update(
context, volume_id, metadata, delete, model, add=True, update=True
):
metadata = metadata.copy()
# Set existing metadata to deleted if delete argument is True. This is
# committed immediately to the DB
if delete:
expected_values = {'volume_id': volume_id}
# We don't want to delete keys we are going to update
if metadata:
expected_values['key'] = db.Not(metadata.keys())
_conditional_update(
context,
model,
{'deleted': True, 'deleted_at': timeutils.utcnow()},
expected_values,
)
# Get existing metadata
db_meta = _volume_x_metadata_get_query(context, volume_id, model).all()
save = []
skip = []
# We only want to send changed metadata.
for row in db_meta:
if row.key in metadata:
value = metadata.pop(row.key)
if row.value != value and update:
# ORM objects will not be saved until we do the bulk save
row.value = value
save.append(row)
continue
skip.append(row)
# We also want to save non-existent metadata
if add:
save.extend(
model(key=key, value=value, volume_id=volume_id)
for key, value in metadata.items()
)
# Do a bulk save
if save:
context.session.bulk_save_objects(save, update_changed_only=True)
# Construct result dictionary with current metadata
save.extend(skip)
result = {row['key']: row['value'] for row in save}
return result
def _volume_user_metadata_get_query(context, volume_id):
return _volume_x_metadata_get_query(
context, volume_id, models.VolumeMetadata
)
def _volume_image_metadata_get_query(context, volume_id):
return _volume_x_metadata_get_query(
context, volume_id, models.VolumeGlanceMetadata
)
@require_context
def _volume_user_metadata_get(context, volume_id):
return _volume_x_metadata_get(context, volume_id, models.VolumeMetadata)
@require_context
def _volume_user_metadata_get_item(context, volume_id, key):
return _volume_x_metadata_get_item(
context,
volume_id,
key,
models.VolumeMetadata,
exception.VolumeMetadataNotFound,
)
@require_context
@require_volume_exists
def _volume_user_metadata_update(context, volume_id, metadata, delete):
return _volume_x_metadata_update(
context, volume_id, metadata, delete, models.VolumeMetadata
)
@require_context
@require_volume_exists
def _volume_image_metadata_update(context, volume_id, metadata, delete):
return _volume_x_metadata_update(
context, volume_id, metadata, delete, models.VolumeGlanceMetadata
)
@require_context
def _volume_glance_metadata_key_to_id(context, volume_id, key):
db_data = volume_glance_metadata_get(context, volume_id)
metadata = {
meta_entry.key: meta_entry.id
for meta_entry in db_data
if meta_entry.key == key
}
metadata_id = metadata[key]
return metadata_id
@require_context
@require_volume_exists
@main_context_manager.reader
def volume_metadata_get(context, volume_id):
return _volume_user_metadata_get(context, volume_id)
@require_context
@require_volume_exists
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def volume_metadata_delete(
context,
volume_id,
key,
meta_type=common.METADATA_TYPES.user,
):
if meta_type == common.METADATA_TYPES.user:
query = _volume_user_metadata_get_query(context, volume_id).filter_by(
key=key
)
entity = query.column_descriptions[0]['entity']
query.update(
{
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': entity.updated_at,
}
)
elif meta_type == common.METADATA_TYPES.image:
metadata_id = _volume_glance_metadata_key_to_id(
context, volume_id, key
)
query = _volume_image_metadata_get_query(context, volume_id).filter_by(
id=metadata_id
)
entity = query.column_descriptions[0]['entity']
query.update(
{
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': entity.updated_at,
}
)
else:
raise exception.InvalidMetadataType(
metadata_type=meta_type, id=volume_id
)
@require_context
@handle_db_data_error
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def volume_metadata_update(
context,
volume_id,
metadata,
delete,
meta_type=common.METADATA_TYPES.user,
):
if meta_type == common.METADATA_TYPES.user:
return _volume_user_metadata_update(
context, volume_id, metadata, delete
)
elif meta_type == common.METADATA_TYPES.image:
return _volume_image_metadata_update(
context, volume_id, metadata, delete
)
else:
raise exception.InvalidMetadataType(
metadata_type=meta_type, id=volume_id
)
###################
def _volume_admin_metadata_get_query(context, volume_id):
return _volume_x_metadata_get_query(
context, volume_id, models.VolumeAdminMetadata
)
@require_admin_context
@require_volume_exists
def _volume_admin_metadata_get(context, volume_id):
return _volume_x_metadata_get(
context, volume_id, models.VolumeAdminMetadata
)
@require_admin_context
@require_volume_exists
def _volume_admin_metadata_update(
context, volume_id, metadata, delete, add=True, update=True
):
return _volume_x_metadata_update(
context,
volume_id,
metadata,
delete,
models.VolumeAdminMetadata,
add=add,
update=update,
)
@require_admin_context
@main_context_manager.reader
def volume_admin_metadata_get(context, volume_id):
return _volume_admin_metadata_get(context, volume_id)
@require_admin_context
@require_volume_exists
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def volume_admin_metadata_delete(context, volume_id, key):
query = _volume_admin_metadata_get_query(context, volume_id).filter_by(
key=key
)
entity = query.column_descriptions[0]['entity']
query.update(
{
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': entity.updated_at,
}
)
@require_admin_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def volume_admin_metadata_update(
context, volume_id, metadata, delete, add=True, update=True
):
return _volume_admin_metadata_update(
context, volume_id, metadata, delete, add=add, update=update
)
###################
@require_context
@handle_db_data_error
@main_context_manager.writer
def snapshot_create(context, values):
values['snapshot_metadata'] = _metadata_refs(
values.get('metadata'), models.SnapshotMetadata
)
if not values.get('id'):
values['id'] = str(uuid.uuid4())
snapshot_ref = models.Snapshot()
snapshot_ref.update(values)
context.session.add(snapshot_ref)
return _snapshot_get(context, values['id'])
@require_admin_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def snapshot_destroy(context, snapshot_id):
utcnow = timeutils.utcnow()
query = model_query(context, models.Snapshot).filter_by(id=snapshot_id)
entity = query.column_descriptions[0]['entity']
updated_values = {
'status': 'deleted',
'deleted': True,
'deleted_at': utcnow,
'updated_at': entity.updated_at,
}
query.update(updated_values)
query = model_query(context, models.SnapshotMetadata).filter_by(
snapshot_id=snapshot_id
)
entity = query.column_descriptions[0]['entity']
query.update(
{
'deleted': True,
'deleted_at': utcnow,
'updated_at': entity.updated_at,
}
)
del updated_values['updated_at']
return updated_values
@require_context
def _snapshot_get(context, snapshot_id):
result = (
model_query(context, models.Snapshot, project_only=True)
.options(joinedload(models.Snapshot.volume))
.options(joinedload(models.Snapshot.snapshot_metadata))
.filter_by(id=snapshot_id)
.first()
)
if not result:
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
return result
@require_context
@main_context_manager.reader
def snapshot_get(context, snapshot_id):
return _snapshot_get(context, snapshot_id)
@require_admin_context
@main_context_manager.reader
def snapshot_get_all(
context,
filters=None,
marker=None,
limit=None,
sort_keys=None,
sort_dirs=None,
offset=None,
):
"""Retrieves all snapshots.
If no sorting parameters are specified then returned snapshots are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param filters: dictionary of filters; will do exact matching on values.
Special keys host and cluster_name refer to the volume.
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:returns: list of matching snapshots
"""
if filters and not is_valid_model_filters(
models.Snapshot,
filters,
exclude_list=('host', 'cluster_name', 'availability_zone'),
):
return []
query = _generate_paginate_query(
context,
marker,
limit,
sort_keys,
sort_dirs,
filters,
offset,
models.Snapshot,
)
# No snapshots would match, return empty list
if not query:
return []
return query.all()
def _snaps_get_query(
context,
project_only=False,
joined_load=True,
):
query = model_query(context, models.Snapshot, project_only=project_only)
if joined_load:
query = query.options(joinedload(models.Snapshot.snapshot_metadata))
return query
@apply_like_filters(model=models.Snapshot)
def _process_snaps_filters(query, filters):
if filters:
filters = filters.copy()
exclude_list = ('host', 'cluster_name', 'availability_zone')
# Ensure that filters' keys exist on the model or is metadata
for key in filters.keys():
# Ensure if filtering based on metadata filter is queried
# then the filters value is a dictionary
if key == 'metadata':
if not isinstance(filters[key], dict):
LOG.debug("Metadata filter value is not valid dictionary")
return None
continue
if key in exclude_list:
continue
# for keys in filter other than metadata and exclude_list
# ensure that the keys are in Snapshot modelt
try:
column_attr = getattr(models.Snapshot, key)
prop = getattr(column_attr, 'property')
if isinstance(prop, RelationshipProperty):
LOG.debug(
"'%s' key is not valid, it maps to a relationship.",
key,
)
return None
except AttributeError:
LOG.debug("'%s' filter key is not valid.", key)
return None
# filter handling for host and cluster name
host = filters.pop('host', None)
cluster = filters.pop('cluster_name', None)
az = filters.pop('availability_zone', None)
if host or cluster or az:
query = query.join(models.Snapshot.volume)
vol_field = models.Volume
if host:
query = query.filter(_filter_host(vol_field.host, host))
if cluster:
query = query.filter(_filter_host(vol_field.cluster_name, cluster))
if az:
query = query.filter_by(availability_zone=az)
filters_dict = {}
LOG.debug("Building query based on filter")
for key, value in filters.items():
if key == 'metadata':
col_attr = getattr(models.Snapshot, 'snapshot_metadata')
for k, v in value.items():
query = query.filter(col_attr.any(key=k, value=v))
else:
filters_dict[key] = value
# Apply exact matches
if filters_dict:
query = query.filter_by(**filters_dict)
return query
@require_context
@main_context_manager.reader
def snapshot_get_all_for_volume(context, volume_id):
return (
model_query(
context, models.Snapshot, read_deleted='no', project_only=True
)
.filter_by(volume_id=volume_id)
.options(joinedload(models.Snapshot.snapshot_metadata))
.all()
)
@require_context
@main_context_manager.reader
def snapshot_get_latest_for_volume(context, volume_id):
result = (
model_query(
context, models.Snapshot, read_deleted='no', project_only=True
)
.filter_by(volume_id=volume_id)
.options(joinedload(models.Snapshot.snapshot_metadata))
.order_by(desc(models.Snapshot.created_at))
.first()
)
if not result:
raise exception.VolumeSnapshotNotFound(volume_id=volume_id)
return result
@require_context
@main_context_manager.reader
def snapshot_get_all_by_host(context, host, filters=None):
if filters and not is_valid_model_filters(models.Snapshot, filters):
return []
query = model_query(
context, models.Snapshot, read_deleted='no', project_only=True
)
if filters:
query = query.filter_by(**filters)
# As a side effect of the introduction of pool-aware scheduler,
# newly created volumes will have pool information appended to
# 'host' field of a volume record. So a volume record in DB can
# now be either form below:
# Host
# Host#Pool
if host and isinstance(host, str):
host_attr = getattr(models.Volume, 'host')
conditions = [host_attr == host, host_attr.op('LIKE')(host + '#%')]
query = (
query.join(models.Snapshot.volume)
.filter(or_(*conditions))
.options(joinedload(models.Snapshot.snapshot_metadata))
)
return query.all()
elif not host:
return []
@require_context
@main_context_manager.reader
def snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id):
return (
model_query(
context, models.Snapshot, read_deleted='no', project_only=True
)
.filter_by(cgsnapshot_id=cgsnapshot_id)
.options(joinedload(models.Snapshot.volume))
.options(joinedload(models.Snapshot.snapshot_metadata))
.all()
)
@require_context
@main_context_manager.reader
def snapshot_get_all_for_group_snapshot(context, group_snapshot_id):
return (
model_query(
context, models.Snapshot, read_deleted='no', project_only=True
)
.filter_by(group_snapshot_id=group_snapshot_id)
.options(joinedload(models.Snapshot.volume))
.options(joinedload(models.Snapshot.snapshot_metadata))
.all()
)
@require_context
@main_context_manager.reader
def snapshot_get_all_by_project(
context,
project_id,
filters=None,
marker=None,
limit=None,
sort_keys=None,
sort_dirs=None,
offset=None,
):
"""Retrieves all snapshots in a project.
If no sorting parameters are specified then returned snapshots are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param project_id: project for all snapshots being retrieved
:param filters: dictionary of filters; will do exact matching on values
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:returns: list of matching snapshots
"""
if filters and not is_valid_model_filters(
models.Snapshot,
filters,
exclude_list=('host', 'cluster_name', 'availability_zone'),
):
return []
authorize_project_context(context, project_id)
# Add project_id to filters
filters = filters.copy() if filters else {}
filters['project_id'] = project_id
query = _generate_paginate_query(
context,
marker,
limit,
sort_keys,
sort_dirs,
filters,
offset,
models.Snapshot,
)
# No snapshots would match, return empty list
if not query:
return []
query = query.options(joinedload(models.Snapshot.snapshot_metadata))
return query.all()
@require_context
def _snapshot_data_get_for_project(
context,
project_id,
volume_type_id=None,
host=None,
skip_internal=True,
):
authorize_project_context(context, project_id)
query = model_query(
context,
func.count(models.Snapshot.id),
func.sum(models.Snapshot.volume_size),
read_deleted="no",
)
if skip_internal:
query = query.filter(models.Snapshot.use_quota)
if volume_type_id or host:
query = query.join(models.Snapshot.volume)
if volume_type_id:
query = query.filter(
models.Volume.volume_type_id == volume_type_id
)
if host:
query = query.filter(_filter_host(models.Volume.host, host))
result = query.filter(models.Snapshot.project_id == project_id).first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@require_context
@main_context_manager.reader
def snapshot_data_get_for_project(
context, project_id, volume_type_id=None, host=None
):
# This method doesn't support filtering temporary resources (use_quota
# field) and defaults to returning all snapshots because all callers (quota
# sync methods and os-host API extension) require all the snapshots.
return _snapshot_data_get_for_project(
context, project_id, volume_type_id, host=host, skip_internal=False
)
@require_context
@main_context_manager.reader
def snapshot_get_all_active_by_window(
context, begin, end=None, project_id=None
):
"""Return snapshots that were active during window."""
query = model_query(context, models.Snapshot, read_deleted="yes")
query = query.filter(
or_(
models.Snapshot.deleted_at == None, # noqa
models.Snapshot.deleted_at > begin,
)
)
query = query.options(joinedload(models.Snapshot.volume))
query = query.options(joinedload(models.Snapshot.snapshot_metadata))
if end:
query = query.filter(models.Snapshot.created_at < end)
if project_id:
query = query.filter_by(project_id=project_id)
return query.all()
@handle_db_data_error
@require_context
@main_context_manager.writer
def snapshot_update(context, snapshot_id, values):
query = model_query(context, models.Snapshot, project_only=True)
result = query.filter_by(id=snapshot_id).update(values)
if not result:
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
@require_context
@main_context_manager.reader
def get_snapshot_summary(context, project_only, filters=None):
"""Retrieves all snapshots summary.
:param context: context to query under
:param project_only: limit summary to snapshots
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see _process_snaps_filters
function for more information
:returns: snapshots summary
"""
if not (project_only or is_admin_context(context)):
raise exception.AdminRequired()
query = model_query(
context,
func.count(models.Snapshot.id),
func.sum(models.Snapshot.volume_size),
read_deleted="no",
)
if project_only:
query = query.filter_by(project_id=context.project_id)
if filters:
query = _process_snaps_filters(query, filters)
if query is None:
return []
result = query.first()
return result[0] or 0, result[1] or 0
####################
def _snapshot_metadata_get_query(context, snapshot_id):
return model_query(
context, models.SnapshotMetadata, read_deleted="no"
).filter_by(snapshot_id=snapshot_id)
@require_context
def _snapshot_metadata_get(context, snapshot_id):
rows = _snapshot_metadata_get_query(context, snapshot_id).all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
@require_context
@require_snapshot_exists
@main_context_manager.reader
def snapshot_metadata_get(context, snapshot_id):
return _snapshot_metadata_get(context, snapshot_id)
@require_context
@require_snapshot_exists
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def snapshot_metadata_delete(context, snapshot_id, key):
query = _snapshot_metadata_get_query(context, snapshot_id).filter_by(
key=key
)
entity = query.column_descriptions[0]['entity']
query.update(
{
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': entity.updated_at,
}
)
@require_context
def _snapshot_metadata_get_item(context, snapshot_id, key):
result = (
_snapshot_metadata_get_query(context, snapshot_id)
.filter_by(key=key)
.first()
)
if not result:
raise exception.SnapshotMetadataNotFound(
metadata_key=key, snapshot_id=snapshot_id
)
return result
@require_context
@require_snapshot_exists
@handle_db_data_error
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def snapshot_metadata_update(context, snapshot_id, metadata, delete):
# Set existing metadata to deleted if delete argument is True
if delete:
original_metadata = _snapshot_metadata_get(context, snapshot_id)
for meta_key, meta_value in original_metadata.items():
if meta_key not in metadata:
meta_ref = _snapshot_metadata_get_item(
context,
snapshot_id,
meta_key,
)
meta_ref.update(
{
'deleted': True,
'deleted_at': timeutils.utcnow(),
}
)
meta_ref.save(context.session)
meta_ref = None
# Now update all existing items with new values, or create new meta
# objects
for meta_key, meta_value in metadata.items():
# update the value whether it exists or not
item = {"value": meta_value}
try:
meta_ref = _snapshot_metadata_get_item(
context, snapshot_id, meta_key
)
except exception.SnapshotMetadataNotFound:
meta_ref = models.SnapshotMetadata()
item.update({"key": meta_key, "snapshot_id": snapshot_id})
meta_ref.update(item)
meta_ref.save(context.session)
return _snapshot_metadata_get(context, snapshot_id)
###################
@handle_db_data_error
@require_admin_context
@main_context_manager.writer
def volume_type_create(context, values, projects=None):
"""Create a new volume type.
In order to pass in extra specs, the values dict should contain a
'extra_specs' key/value pair:
{'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
"""
if not values.get('id'):
values['id'] = str(uuid.uuid4())
projects = projects or []
orm_projects = []
try:
_volume_type_get_by_name(context, values['name'])
raise exception.VolumeTypeExists(id=values['name'])
except exception.VolumeTypeNotFoundByName:
pass
try:
_volume_type_get(context, values['id'])
raise exception.VolumeTypeExists(id=values['id'])
except exception.VolumeTypeNotFound:
pass
try:
values['extra_specs'] = _metadata_refs(
values.get('extra_specs'),
models.VolumeTypeExtraSpecs,
)
volume_type_ref = models.VolumeType()
volume_type_ref.update(values)
context.session.add(volume_type_ref)
except Exception as e:
raise db_exc.DBError(e)
for project in set(projects):
access_ref = models.VolumeTypeProjects()
access_ref.update(
{
"volume_type_id": volume_type_ref.id,
"project_id": project,
}
)
access_ref.save(context.session)
orm_projects.append(access_ref)
volume_type_ref.projects = orm_projects
return volume_type_ref
@handle_db_data_error
@require_admin_context
@main_context_manager.writer
def group_type_create(context, values, projects=None):
"""Create a new group type.
In order to pass in group specs, the values dict should contain a
'group_specs' key/value pair:
{'group_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
"""
if not values.get('id'):
values['id'] = str(uuid.uuid4())
projects = projects or []
try:
_group_type_get_by_name(context, values['name'])
raise exception.GroupTypeExists(id=values['name'])
except exception.GroupTypeNotFoundByName:
pass
try:
_group_type_get(context, values['id'])
raise exception.GroupTypeExists(id=values['id'])
except exception.GroupTypeNotFound:
pass
try:
values['group_specs'] = _metadata_refs(
values.get('group_specs'), models.GroupTypeSpecs
)
group_type_ref = models.GroupType()
group_type_ref.update(values)
context.session.add(group_type_ref)
except Exception as e:
raise db_exc.DBError(e)
for project in set(projects):
access_ref = models.GroupTypeProjects()
access_ref.update(
{
"group_type_id": group_type_ref.id,
"project_id": project,
}
)
access_ref.save(context.session)
return group_type_ref
def _volume_type_get_query(context, read_deleted='no', expected_fields=None):
expected_fields = expected_fields or []
query = model_query(
context, models.VolumeType, read_deleted=read_deleted
).options(joinedload(models.VolumeType.extra_specs))
for expected in expected_fields:
query = query.options(joinedload(getattr(models.VolumeType, expected)))
if not context.is_admin:
the_filter = [models.VolumeType.is_public == true()]
projects_attr = getattr(models.VolumeType, 'projects')
the_filter.extend([projects_attr.any(project_id=context.project_id)])
query = query.filter(or_(*the_filter))
return query
def _group_type_get_query(context, read_deleted='no', expected_fields=None):
expected_fields = expected_fields or []
query = model_query(
context, models.GroupType, read_deleted=read_deleted
).options(joinedload(models.GroupType.group_specs))
if 'projects' in expected_fields:
query = query.options(joinedload(models.GroupType.projects))
if not context.is_admin:
the_filter = [models.GroupType.is_public == true()]
projects_attr = models.GroupType.projects
the_filter.extend([projects_attr.any(project_id=context.project_id)])
query = query.filter(or_(*the_filter))
return query
def _process_volume_types_filters(query, filters):
context = filters.pop('context', None)
if 'is_public' in filters and filters['is_public'] is not None:
the_filter = [models.VolumeType.is_public == filters['is_public']]
if filters['is_public'] and context.project_id is not None:
projects_attr = getattr(models.VolumeType, 'projects')
the_filter.extend(
[projects_attr.any(project_id=context.project_id, deleted=0)]
)
if len(the_filter) > 1:
query = query.filter(or_(*the_filter))
else:
query = query.filter(the_filter[0])
if 'is_public' in filters:
del filters['is_public']
if filters:
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.VolumeType, filters):
return
if filters.get('extra_specs') is not None:
the_filter = []
searchdict = filters.pop('extra_specs')
extra_specs = getattr(models.VolumeType, 'extra_specs')
for k, v in searchdict.items():
# NOTE(tommylikehu): We will use 'LIKE' operator for
# 'availability_zones' extra spec as it always store the
# AZ list info within the format: "az1, az2,...."
if k == 'RESKEY:availability_zones':
the_filter.extend(
[
extra_specs.any(
models.VolumeTypeExtraSpecs.value.like(
u'%%%s%%' % v
),
key=k,
deleted=False,
)
]
)
else:
the_filter.extend(
[extra_specs.any(key=k, value=v, deleted=False)]
)
if len(the_filter) > 1:
query = query.filter(and_(*the_filter))
else:
query = query.filter(the_filter[0])
query = query.filter_by(**filters)
return query
def _process_group_types_filters(query, filters):
context = filters.pop('context', None)
if 'is_public' in filters and filters['is_public'] is not None:
the_filter = [models.GroupType.is_public == filters['is_public']]
if filters['is_public'] and context.project_id is not None:
projects_attr = getattr(models.GroupType, 'projects')
the_filter.extend(
[
projects_attr.any(
project_id=context.project_id, deleted=False
)
]
)
if len(the_filter) > 1:
query = query.filter(or_(*the_filter))
else:
query = query.filter(the_filter[0])
if 'is_public' in filters:
del filters['is_public']
if filters:
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.GroupType, filters):
return
if filters.get('group_specs') is not None:
the_filter = []
searchdict = filters.pop('group_specs')
group_specs = getattr(models.GroupType, 'group_specs')
for k, v in searchdict.items():
the_filter.extend(
[group_specs.any(key=k, value=v, deleted=False)]
)
if len(the_filter) > 1:
query = query.filter(and_(*the_filter))
else:
query = query.filter(the_filter[0])
query = query.filter_by(**filters)
return query
@handle_db_data_error
@require_admin_context
def _type_update(context, type_id, values, is_group):
if is_group:
model = models.GroupType
exists_exc = exception.GroupTypeExists
else:
model = models.VolumeType
exists_exc = exception.VolumeTypeExists
# No description change
if values['description'] is None:
del values['description']
# No is_public change
if values['is_public'] is None:
del values['is_public']
# No name change
if values['name'] is None:
del values['name']
else:
# Group type name is unique. If change to a name that belongs to
# a different group_type, it should be prevented.
conditions = and_(
model.name == values['name'], model.id != type_id, ~model.deleted
)
query = context.session.query(sql.exists().where(conditions))
if query.scalar():
raise exists_exc(id=values['name'])
query = model_query(context, model, project_only=True)
result = query.filter_by(id=type_id).update(values)
if not result:
if is_group:
raise exception.GroupTypeNotFound(group_type_id=type_id)
else:
raise exception.VolumeTypeNotFound(volume_type_id=type_id)
@main_context_manager.writer
def volume_type_update(context, volume_type_id, values):
_type_update(context, volume_type_id, values, is_group=False)
@main_context_manager.writer
def group_type_update(context, group_type_id, values):
_type_update(context, group_type_id, values, is_group=True)
@require_context
@main_context_manager.reader
def volume_type_get_all(
context,
inactive=False,
filters=None,
marker=None,
limit=None,
sort_keys=None,
sort_dirs=None,
offset=None,
list_result=False,
):
"""Returns a dict describing all volume_types with name as key.
If no sort parameters are specified then the returned volume types are
sorted first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see _process_volume_type_filters
function for more information
:param list_result: For compatibility, if list_result = True, return a list
instead of dict.
:returns: list/dict of matching volume types
"""
# Add context for _process_volume_types_filters
filters = filters or {}
filters['context'] = context
# Generate the query
query = _generate_paginate_query(
context,
marker,
limit,
sort_keys,
sort_dirs,
filters,
offset,
models.VolumeType,
)
# No volume types would match, return empty dict or list
if query is None:
if list_result:
return []
return {}
rows = query.all()
if list_result:
result = [
_dict_with_extra_specs_if_authorized(context, row) for row in rows
]
return result
result = {
row['name']: _dict_with_extra_specs_if_authorized(context, row)
for row in rows
}
return result
@require_context
@main_context_manager.reader
def group_type_get_all(
context,
inactive=False,
filters=None,
marker=None,
limit=None,
sort_keys=None,
sort_dirs=None,
offset=None,
list_result=False,
):
"""Returns a dict describing all group_types with name as key.
If no sort parameters are specified then the returned group types are
sorted first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching is used for other
values, see _process_volume_type_filters function for more information
:param list_result: For compatibility, if list_result = True, return a list
instead of dict.
:returns: list/dict of matching group types
"""
# Add context for _process_group_types_filters
filters = filters or {}
filters['context'] = context
# Generate the query
query = _generate_paginate_query(
context,
marker,
limit,
sort_keys,
sort_dirs,
filters,
offset,
models.GroupType,
)
# No group types would match, return empty dict or list
if query is None:
if list_result:
return []
return {}
rows = query.all()
if list_result:
result = [
_dict_with_group_specs_if_authorized(context, row) for row in rows
]
return result
result = {
row['name']: _dict_with_group_specs_if_authorized(context, row)
for row in rows
}
return result
def _volume_type_get_id_from_volume_type_query(context, id):
return model_query(
context,
models.VolumeType.id,
read_deleted="no",
base_model=models.VolumeType,
).filter_by(id=id)
def _group_type_get_id_from_group_type_query(context, id):
return model_query(
context,
models.GroupType.id,
read_deleted="no",
base_model=models.GroupType,
).filter_by(id=id)
def _volume_type_get_id_from_volume_type(context, id):
result = _volume_type_get_id_from_volume_type_query(context, id).first()
if not result:
raise exception.VolumeTypeNotFound(volume_type_id=id)
return result[0]
def _group_type_get_id_from_group_type(context, id):
result = _group_type_get_id_from_group_type_query(context, id).first()
if not result:
raise exception.GroupTypeNotFound(group_type_id=id)
return result[0]
def _volume_type_get_db_object(
context, id, inactive=False, expected_fields=None
):
read_deleted = "yes" if inactive else "no"
result = (
_volume_type_get_query(context, read_deleted, expected_fields)
.filter_by(id=id)
.first()
)
return result
def _group_type_get_db_object(
context,
id,
inactive=False,
expected_fields=None,
):
read_deleted = "yes" if inactive else "no"
result = (
_group_type_get_query(context, read_deleted, expected_fields)
.filter_by(id=id)
.first()
)
return result
@require_context
def _volume_type_get(context, id, inactive=False, expected_fields=None):
expected_fields = expected_fields or []
result = _volume_type_get_db_object(
context,
id,
inactive,
expected_fields,
)
if not result:
raise exception.VolumeTypeNotFound(volume_type_id=id)
vtype = _dict_with_extra_specs_if_authorized(context, result)
if 'projects' in expected_fields:
vtype['projects'] = [p['project_id'] for p in result['projects']]
if 'qos_specs' in expected_fields:
vtype['qos_specs'] = result.qos_specs
return vtype
@require_context
def _group_type_get(context, id, inactive=False, expected_fields=None):
expected_fields = expected_fields or []
result = _group_type_get_db_object(context, id, inactive, expected_fields)
if not result:
raise exception.GroupTypeNotFound(group_type_id=id)
gtype = _dict_with_group_specs_if_authorized(context, result)
if 'projects' in expected_fields:
gtype['projects'] = [p['project_id'] for p in result['projects']]
return gtype
@require_context
@main_context_manager.reader
def volume_type_get(context, id, inactive=False, expected_fields=None):
"""Get volume type by id.
:param context: context to query under
:param id: Volume type id to get.
:param inactive: Consider inactive volume types when searching
:param expected_fields: Return those additional fields.
Supported fields are: projects.
:returns: volume type
"""
return _volume_type_get(
context, id, inactive=inactive, expected_fields=expected_fields
)
@require_context
@main_context_manager.reader
def group_type_get(context, id, inactive=False, expected_fields=None):
"""Return a dict describing specific group_type."""
return _group_type_get(
context, id, inactive=inactive, expected_fields=expected_fields
)
def _volume_type_get_full(context, id):
"""Return dict for a specific volume_type with extra_specs and projects."""
return _volume_type_get(
context,
id,
inactive=False,
expected_fields=('extra_specs', 'projects'),
)
def _group_type_get_full(context, id):
"""Return dict for a specific group_type with group_specs and projects."""
return _group_type_get(
context,
id,
inactive=False,
expected_fields=('group_specs', 'projects'),
)
@require_context
def _volume_type_ref_get(context, id, inactive=False):
read_deleted = "yes" if inactive else "no"
result = (
model_query(context, models.VolumeType, read_deleted=read_deleted)
.options(joinedload(models.VolumeType.extra_specs))
.filter_by(id=id)
.first()
)
if not result:
raise exception.VolumeTypeNotFound(volume_type_id=id)
return result
@require_context
def _group_type_ref_get(context, id, inactive=False):
read_deleted = "yes" if inactive else "no"
result = (
model_query(context, models.GroupType, read_deleted=read_deleted)
.options(joinedload(models.GroupType.group_specs))
.filter_by(id=id)
.first()
)
if not result:
raise exception.GroupTypeNotFound(group_type_id=id)
return result
@require_context
def _volume_type_get_by_name(context, name):
result = (
model_query(context, models.VolumeType)
.options(joinedload(models.VolumeType.extra_specs))
.filter_by(name=name)
.first()
)
if not result:
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
return _dict_with_extra_specs_if_authorized(context, result)
@require_context
def _group_type_get_by_name(context, name):
result = (
model_query(context, models.GroupType)
.options(joinedload(models.GroupType.group_specs))
.filter_by(name=name)
.first()
)
if not result:
raise exception.GroupTypeNotFoundByName(group_type_name=name)
return _dict_with_group_specs_if_authorized(context, result)
@require_context
@main_context_manager.reader
def volume_type_get_by_name(context, name):
"""Return a dict describing specific volume_type."""
return _volume_type_get_by_name(context, name)
@require_context
@main_context_manager.reader
def group_type_get_by_name(context, name):
"""Return a dict describing specific group_type."""
return _group_type_get_by_name(context, name)
@require_context
@main_context_manager.reader
def volume_types_get_by_name_or_id(context, volume_type_list):
"""Return a dict describing specific volume_type."""
req_volume_types = []
for vol_t in volume_type_list:
if not uuidutils.is_uuid_like(vol_t):
vol_type = _volume_type_get_by_name(context, vol_t)
else:
try:
vol_type = _volume_type_get(context, vol_t)
except exception.VolumeTypeNotFound:
# check again if we get this volume type by uuid-like name
try:
vol_type = _volume_type_get_by_name(context, vol_t)
except exception.VolumeTypeNotFoundByName:
raise exception.VolumeTypeNotFound(volume_type_id=vol_t)
req_volume_types.append(vol_type)
return req_volume_types
@require_context
@main_context_manager.reader
def group_types_get_by_name_or_id(context, group_type_list):
"""Return a dict describing specific group_type."""
req_group_types = []
for grp_t in group_type_list:
if not uuidutils.is_uuid_like(grp_t):
grp_type = _group_type_get_by_name(context, grp_t)
else:
grp_type = _group_type_get(context, grp_t)
req_group_types.append(grp_type)
return req_group_types
@require_admin_context
@require_qos_specs_exists
@main_context_manager.reader
def volume_type_qos_associations_get(context, qos_specs_id, inactive=False):
read_deleted = "yes" if inactive else "no"
vts = (
model_query(context, models.VolumeType, read_deleted=read_deleted)
.options(joinedload(models.VolumeType.extra_specs))
.options(joinedload(models.VolumeType.projects))
.filter_by(qos_specs_id=qos_specs_id)
.all()
)
return vts
@require_admin_context
@main_context_manager.writer
def volume_type_qos_associate(context, type_id, qos_specs_id):
_volume_type_get(context, type_id)
context.session.query(models.VolumeType).filter_by(id=type_id).update(
{'qos_specs_id': qos_specs_id, 'updated_at': timeutils.utcnow()}
)
@require_admin_context
@main_context_manager.writer
def volume_type_qos_disassociate(context, qos_specs_id, type_id):
"""Disassociate volume type from qos specs."""
_volume_type_get(context, type_id)
context.session.query(models.VolumeType).filter_by(id=type_id).filter_by(
qos_specs_id=qos_specs_id
).update({'qos_specs_id': None, 'updated_at': timeutils.utcnow()})
@require_admin_context
@main_context_manager.writer
def volume_type_qos_disassociate_all(context, qos_specs_id):
"""Disassociate all volume types associated with specified qos specs."""
context.session.query(models.VolumeType).filter_by(
qos_specs_id=qos_specs_id
).update({'qos_specs_id': None, 'updated_at': timeutils.utcnow()})
@require_admin_context
@main_context_manager.reader
def volume_type_qos_specs_get(context, type_id):
"""Return all qos specs for given volume type.
result looks like:
{
'qos_specs':
{
'id': 'qos-specs-id',
'name': 'qos_specs_name',
'consumer': 'Consumer',
'specs': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3'
}
}
}
"""
_volume_type_get(context, type_id, context.session)
row = (
context.session.query(models.VolumeType)
.options(joinedload(models.VolumeType.qos_specs))
.filter_by(id=type_id)
.first()
)
# row.qos_specs is a list of QualityOfServiceSpecs ref
specs = _dict_with_qos_specs(row.qos_specs)
if not specs:
# turn empty list to None
specs = None
else:
specs = specs[0]
return {'qos_specs': specs}
@require_admin_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def volume_type_destroy(context, type_id):
utcnow = timeutils.utcnow()
vol_types = volume_type_get_all(context)
if len(vol_types) <= 1:
raise exception.VolumeTypeDeletionError(volume_type_id=type_id)
_volume_type_get(context, type_id)
results = (
model_query(context, models.Volume)
.filter_by(volume_type_id=type_id)
.all()
)
group_count = (
model_query(
context,
models.GroupVolumeTypeMapping,
read_deleted="no",
)
.filter_by(volume_type_id=type_id)
.count()
)
cg_count = (
model_query(
context,
models.ConsistencyGroup,
)
.filter(models.ConsistencyGroup.volume_type_id.contains(type_id))
.count()
)
if results or group_count or cg_count:
LOG.error('VolumeType %s deletion failed, VolumeType in use.', type_id)
raise exception.VolumeTypeInUse(volume_type_id=type_id)
query = model_query(context, models.VolumeType).filter_by(id=type_id)
entity = query.column_descriptions[0]['entity']
updated_values = {
'deleted': True,
'deleted_at': utcnow,
'updated_at': entity.updated_at,
}
query.update(updated_values)
query = model_query(
context,
models.VolumeTypeExtraSpecs,
).filter_by(volume_type_id=type_id)
entity = query.column_descriptions[0]['entity']
query.update(
{
'deleted': True,
'deleted_at': utcnow,
'updated_at': entity.updated_at,
}
)
query = model_query(context, models.Encryption).filter_by(
volume_type_id=type_id
)
entity = query.column_descriptions[0]['entity']
query.update(
{
'deleted': True,
'deleted_at': utcnow,
'updated_at': entity.updated_at,
}
)
model_query(
context, models.VolumeTypeProjects, read_deleted="int_no"
).filter_by(volume_type_id=type_id).soft_delete(synchronize_session=False)
del updated_values['updated_at']
return updated_values
@require_admin_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def group_type_destroy(context, type_id):
_group_type_get(context, type_id)
results = (
model_query(context, models.Group)
.filter_by(group_type_id=type_id)
.all()
)
if results:
LOG.error('GroupType %s deletion failed, GroupType in use.', type_id)
raise exception.GroupTypeInUse(group_type_id=type_id)
query = model_query(context, models.GroupType).filter_by(id=type_id)
entity = query.column_descriptions[0]['entity']
query.update(
{
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': entity.updated_at,
}
)
query = model_query(context, models.GroupTypeSpecs).filter_by(
group_type_id=type_id
)
entity = query.column_descriptions[0]['entity']
query.update(
{
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': entity.updated_at,
}
)
@require_context
@main_context_manager.reader
def volume_get_all_active_by_window(context, begin, end=None, project_id=None):
"""Return volumes that were active during window."""
query = model_query(context, models.Volume, read_deleted="yes")
query = query.filter(
or_(
models.Volume.deleted_at == None, # noqa
models.Volume.deleted_at > begin,
)
)
if end:
query = query.filter(models.Volume.created_at < end)
if project_id:
query = query.filter_by(project_id=project_id)
query = (
query.options(joinedload(models.Volume.volume_metadata))
.options(joinedload(models.Volume.volume_type))
.options(joinedload(models.Volume.volume_attachment))
.options(joinedload(models.Volume.consistencygroup))
.options(joinedload(models.Volume.group))
)
if is_admin_context(context):
query = query.options(joinedload(models.Volume.volume_admin_metadata))
return query.all()
def _volume_type_access_query(context):
return model_query(
context, models.VolumeTypeProjects, read_deleted="int_no"
)
def _group_type_access_query(context):
return model_query(context, models.GroupTypeProjects, read_deleted="no")
@require_admin_context
@main_context_manager.reader
def volume_type_access_get_all(context, type_id):
volume_type_id = _volume_type_get_id_from_volume_type(context, type_id)
return (
_volume_type_access_query(context)
.filter_by(volume_type_id=volume_type_id)
.all()
)
@require_admin_context
@main_context_manager.reader
def group_type_access_get_all(context, type_id):
group_type_id = _group_type_get_id_from_group_type(context, type_id)
return (
_group_type_access_query(context)
.filter_by(group_type_id=group_type_id)
.all()
)
def _group_volume_type_mapping_query(context):
return model_query(
context, models.GroupVolumeTypeMapping, read_deleted="no"
)
@require_admin_context
@main_context_manager.reader
def volume_type_get_all_by_group(context, group_id):
# Generic volume group
mappings = (
_group_volume_type_mapping_query(context)
.filter_by(group_id=group_id)
.all()
)
volume_type_ids = [mapping.volume_type_id for mapping in mappings]
query = (
model_query(context, models.VolumeType, read_deleted='no')
.filter(models.VolumeType.id.in_(volume_type_ids))
.options(joinedload(models.VolumeType.extra_specs))
.options(joinedload(models.VolumeType.projects))
.all()
)
return query
def _group_volume_type_mapping_get_all_by_group_volume_type(
context, group_id, volume_type_id
):
mappings = (
_group_volume_type_mapping_query(context)
.filter_by(group_id=group_id)
.filter_by(volume_type_id=volume_type_id)
.all()
)
return mappings
@require_admin_context
@main_context_manager.writer
def volume_type_access_add(context, type_id, project_id):
"""Add given tenant to the volume type access list."""
volume_type_id = _volume_type_get_id_from_volume_type(context, type_id)
access_ref = models.VolumeTypeProjects()
access_ref.update(
{"volume_type_id": volume_type_id, "project_id": project_id}
)
try:
access_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.VolumeTypeAccessExists(
volume_type_id=type_id, project_id=project_id
)
return access_ref
@require_admin_context
@main_context_manager.writer
def group_type_access_add(context, type_id, project_id):
"""Add given tenant to the group type access list."""
group_type_id = _group_type_get_id_from_group_type(context, type_id)
access_ref = models.GroupTypeProjects()
access_ref.update(
{"group_type_id": group_type_id, "project_id": project_id}
)
try:
access_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.GroupTypeAccessExists(
group_type_id=type_id, project_id=project_id
)
return access_ref
@require_admin_context
@main_context_manager.writer
def volume_type_access_remove(context, type_id, project_id):
"""Remove given tenant from the volume type access list."""
volume_type_id = _volume_type_get_id_from_volume_type(context, type_id)
count = (
_volume_type_access_query(context)
.filter_by(volume_type_id=volume_type_id)
.filter_by(project_id=project_id)
.soft_delete(synchronize_session=False)
)
if count == 0:
raise exception.VolumeTypeAccessNotFound(
volume_type_id=type_id, project_id=project_id
)
def _project_default_volume_type_get(context, project_id=None):
"""Get default volume type(s) for a project(s)
If a project id is passed, it returns default type for that particular
project else returns default volume types for all projects
"""
if project_id:
return (
model_query(context, models.DefaultVolumeTypes)
.filter_by(project_id=project_id)
.first()
)
return model_query(context, models.DefaultVolumeTypes).all()
@main_context_manager.reader
def project_default_volume_type_get(context, project_id=None):
"""Get default volume type(s) for a project(s)
If a project id is passed, it returns default type for that particular
project else returns default volume types for all projects
"""
return _project_default_volume_type_get(context, project_id)
@main_context_manager.writer
def project_default_volume_type_set(context, volume_type_id, project_id):
"""Set default volume type for a project"""
update_default = _project_default_volume_type_get(context, project_id)
if update_default:
LOG.info("Updating default type for project %s", project_id)
update_default.volume_type_id = volume_type_id
return update_default
access_ref = models.DefaultVolumeTypes(
volume_type_id=volume_type_id, project_id=project_id
)
access_ref.save(context.session)
return access_ref
@main_context_manager.reader
def get_all_projects_with_default_type(context, volume_type_id):
"""Get all projects with volume_type_id as their default type"""
return (
model_query(context, models.DefaultVolumeTypes)
.filter_by(volume_type_id=volume_type_id)
.all()
)
@main_context_manager.writer
def project_default_volume_type_unset(context, project_id):
"""Unset default volume type for a project (hard delete)"""
model_query(context, models.DefaultVolumeTypes).filter_by(
project_id=project_id
).delete()
@require_admin_context
@main_context_manager.writer
def group_type_access_remove(context, type_id, project_id):
"""Remove given tenant from the group type access list."""
group_type_id = _group_type_get_id_from_group_type(context, type_id)
count = (
_group_type_access_query(context)
.filter_by(group_type_id=group_type_id)
.filter_by(project_id=project_id)
.soft_delete(synchronize_session=False)
)
if count == 0:
raise exception.GroupTypeAccessNotFound(
group_type_id=type_id, project_id=project_id
)
####################
def _volume_type_extra_specs_query(context, volume_type_id):
return model_query(
context,
models.VolumeTypeExtraSpecs,
read_deleted="no",
).filter_by(volume_type_id=volume_type_id)
@require_context
@main_context_manager.reader
def volume_type_extra_specs_get(context, volume_type_id):
rows = _volume_type_extra_specs_query(context, volume_type_id).all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
@require_context
@main_context_manager.writer
def volume_type_extra_specs_delete(context, volume_type_id, key):
_volume_type_extra_specs_get_item(context, volume_type_id, key)
query = _volume_type_extra_specs_query(
context,
volume_type_id,
).filter_by(key=key)
entity = query.column_descriptions[0]['entity']
query.update(
{
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': entity.updated_at,
},
)
@require_context
def _volume_type_extra_specs_get_item(context, volume_type_id, key):
result = (
_volume_type_extra_specs_query(context, volume_type_id)
.filter_by(key=key)
.first()
)
if not result:
raise exception.VolumeTypeExtraSpecsNotFound(
extra_specs_key=key,
volume_type_id=volume_type_id,
)
return result
@handle_db_data_error
@require_context
@main_context_manager.writer
def volume_type_extra_specs_update_or_create(
context,
volume_type_id,
extra_specs,
):
spec_ref = None
for key, value in extra_specs.items():
try:
spec_ref = _volume_type_extra_specs_get_item(
context,
volume_type_id,
key,
)
except exception.VolumeTypeExtraSpecsNotFound:
spec_ref = models.VolumeTypeExtraSpecs()
spec_ref.update(
{
"key": key,
"value": value,
"volume_type_id": volume_type_id,
"deleted": False,
},
)
spec_ref.save(context.session)
return extra_specs
####################
def _group_type_specs_query(context, group_type_id):
return model_query(
context,
models.GroupTypeSpecs,
read_deleted="no",
).filter_by(group_type_id=group_type_id)
@require_context
@main_context_manager.reader
def group_type_specs_get(context, group_type_id):
rows = _group_type_specs_query(context, group_type_id).all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
@require_context
@main_context_manager.writer
def group_type_specs_delete(context, group_type_id, key):
_group_type_specs_get_item(context, group_type_id, key)
query = _group_type_specs_query(context, group_type_id).filter_by(key=key)
entity = query.column_descriptions[0]['entity']
query.update(
{
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': entity.updated_at,
},
)
@require_context
def _group_type_specs_get_item(context, group_type_id, key):
result = (
_group_type_specs_query(context, group_type_id)
.filter_by(key=key)
.first()
)
if not result:
raise exception.GroupTypeSpecsNotFound(
group_specs_key=key,
group_type_id=group_type_id,
)
return result
@handle_db_data_error
@require_context
@main_context_manager.writer
def group_type_specs_update_or_create(context, group_type_id, group_specs):
spec_ref = None
for key, value in group_specs.items():
try:
spec_ref = _group_type_specs_get_item(context, group_type_id, key)
except exception.GroupTypeSpecsNotFound:
spec_ref = models.GroupTypeSpecs()
spec_ref.update(
{
"key": key,
"value": value,
"group_type_id": group_type_id,
"deleted": False,
},
)
spec_ref.save(context.session)
return group_specs
####################
@require_admin_context
@main_context_manager.writer
def qos_specs_create(context, values):
"""Create a new QoS specs.
:param values dictionary that contains specifications for QoS
Expected format of the input parameter:
.. code-block:: json
{
'name': 'Name',
'consumer': 'front-end',
'specs':
{
'total_iops_sec': 1000,
'total_bytes_sec': 1024000
}
}
"""
specs_id = str(uuid.uuid4())
try:
_qos_specs_get_all_by_name(context, values['name'])
raise exception.QoSSpecsExists(specs_id=values['name'])
except exception.QoSSpecsNotFound:
pass
try:
# Insert a root entry for QoS specs
specs_root = models.QualityOfServiceSpecs()
root = {'id': specs_id}
# 'QoS_Specs_Name' is an internal reserved key to store
# the name of QoS specs
root['key'] = 'QoS_Specs_Name'
root['value'] = values['name']
LOG.debug("DB qos_specs_create(): root %s", root)
specs_root.update(root)
specs_root.save(context.session)
# Save 'consumer' value directly as it will not be in
# values['specs'] and so we avoid modifying/copying passed in dict
consumer = {
'key': 'consumer',
'value': values['consumer'],
'specs_id': specs_id,
'id': str(uuid.uuid4()),
}
cons_entry = models.QualityOfServiceSpecs()
cons_entry.update(consumer)
cons_entry.save(context.session)
# Insert all specification entries for QoS specs
for k, v in values.get('specs', {}).items():
item = {'key': k, 'value': v, 'specs_id': specs_id}
item['id'] = str(uuid.uuid4())
spec_entry = models.QualityOfServiceSpecs()
spec_entry.update(item)
spec_entry.save(context.session)
except db_exc.DBDataError:
msg = _('Error writing field to database')
LOG.exception(msg)
raise exception.Invalid(msg)
except Exception as e:
raise db_exc.DBError(e)
return {'id': specs_root.id, 'name': specs_root.value}
@require_admin_context
def _qos_specs_get_all_by_name(context, name, inactive=False):
read_deleted = 'yes' if inactive else 'no'
results = (
model_query(
context,
models.QualityOfServiceSpecs,
read_deleted=read_deleted,
)
.filter_by(key='QoS_Specs_Name')
.filter_by(value=name)
.options(joinedload(models.QualityOfServiceSpecs.specs))
.all()
)
if not results:
raise exception.QoSSpecsNotFound(specs_id=name)
return results
@require_admin_context
def _qos_specs_get_all_ref(context, qos_specs_id, inactive=False):
read_deleted = 'yes' if inactive else 'no'
result = (
model_query(
context,
models.QualityOfServiceSpecs,
read_deleted=read_deleted,
)
.filter_by(id=qos_specs_id)
.options(joinedload(models.QualityOfServiceSpecs.specs))
.all()
)
if not result:
raise exception.QoSSpecsNotFound(specs_id=qos_specs_id)
return result
def _dict_with_children_specs(specs):
"""Convert specs list to a dict."""
result = {}
update_time = None
for spec in specs:
# Skip deleted keys
if not spec['deleted']:
# Add update time to specs list, in order to get the keyword
# 'updated_at' in specs info when printing logs.
if not update_time and spec['updated_at']:
update_time = spec['updated_at']
elif update_time and spec['updated_at']:
if (update_time - spec['updated_at']).total_seconds() < 0:
update_time = spec['updated_at']
result.update({spec['key']: spec['value']})
if update_time:
result.update({'updated_at': update_time})
return result
def _dict_with_qos_specs(rows):
"""Convert qos specs query results to list.
Qos specs query results are a list of quality_of_service_specs refs,
some are root entry of a qos specs (key == 'QoS_Specs_Name') and the
rest are children entry, a.k.a detailed specs for a qos specs. This
function converts query results to a dict using spec name as key.
"""
result = []
for row in rows:
if row['key'] == 'QoS_Specs_Name':
# Add create time for member, in order to get the keyword
# 'created_at' in the specs info when printing logs.
member = {
'name': row['value'],
'id': row['id'],
'created_at': row['created_at'],
}
if row.specs:
spec_dict = _dict_with_children_specs(row.specs)
member['consumer'] = spec_dict.pop('consumer')
if spec_dict.get('updated_at'):
member['updated_at'] = spec_dict.pop('updated_at')
member.update({'specs': spec_dict})
result.append(member)
return result
@require_admin_context
@main_context_manager.reader
def qos_specs_get(context, qos_specs_id, inactive=False):
rows = _qos_specs_get_all_ref(context, qos_specs_id, inactive)
return _dict_with_qos_specs(rows)[0]
@require_admin_context
@main_context_manager.reader
def qos_specs_get_all(
context,
filters=None,
marker=None,
limit=None,
offset=None,
sort_keys=None,
sort_dirs=None,
):
"""Returns a list of all qos_specs.
Results is like:
[{
'id': SPECS-UUID,
'name': 'qos_spec-1',
'consumer': 'back-end',
'specs': {
'key1': 'value1',
'key2': 'value2',
...
}
},
{
'id': SPECS-UUID,
'name': 'qos_spec-2',
'consumer': 'front-end',
'specs': {
'key1': 'value1',
'key2': 'value2',
...
}
},
]
"""
# Generate the query
query = _generate_paginate_query(
context,
marker,
limit,
sort_keys,
sort_dirs,
filters,
offset,
models.QualityOfServiceSpecs,
)
# No Qos specs would match, return empty list
if query is None:
return []
rows = query.all()
return _dict_with_qos_specs(rows)
@require_admin_context
def _qos_specs_get_query(context):
rows = (
model_query(
context,
models.QualityOfServiceSpecs,
read_deleted='no',
)
.options(joinedload(models.QualityOfServiceSpecs.specs))
.filter_by(key='QoS_Specs_Name')
)
return rows
def _process_qos_specs_filters(query, filters):
if filters:
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.QualityOfServiceSpecs, filters):
return
query = query.filter_by(**filters)
return query
@require_admin_context
def _qos_specs_get(context, qos_spec_id):
result = (
model_query(context, models.QualityOfServiceSpecs, read_deleted='no')
.filter_by(id=qos_spec_id)
.filter_by(key='QoS_Specs_Name')
.first()
)
if not result:
raise exception.QoSSpecsNotFound(specs_id=qos_spec_id)
return result
@require_admin_context
@main_context_manager.reader
def qos_specs_get_by_name(context, name, inactive=False):
rows = _qos_specs_get_all_by_name(context, name, inactive)
return _dict_with_qos_specs(rows)[0]
@require_admin_context
@main_context_manager.reader
def qos_specs_associations_get(context, qos_specs_id):
"""Return all entities associated with specified qos specs.
For now, the only entity that is possible to associate with
a qos specs is volume type, so this is just a wrapper of
volume_type_qos_associations_get(). But it's possible to
extend qos specs association to other entities, such as volumes,
sometime in future.
"""
return volume_type_qos_associations_get(context, qos_specs_id)
@require_admin_context
@main_context_manager.writer
def qos_specs_associate(context, qos_specs_id, type_id):
"""Associate volume type from specified qos specs."""
return volume_type_qos_associate(context, type_id, qos_specs_id)
@require_admin_context
@main_context_manager.writer
def qos_specs_disassociate(context, qos_specs_id, type_id):
"""Disassociate volume type from specified qos specs."""
return volume_type_qos_disassociate(context, qos_specs_id, type_id)
@require_admin_context
@main_context_manager.writer
def qos_specs_disassociate_all(context, qos_specs_id):
"""Disassociate all entities associated with specified qos specs.
For now, the only entity that is possible to associate with
a qos specs is volume type, so this is just a wrapper of
volume_type_qos_disassociate_all(). But it's possible to
extend qos specs association to other entities, such as volumes,
sometime in future.
"""
return volume_type_qos_disassociate_all(context, qos_specs_id)
@require_admin_context
@main_context_manager.writer
def qos_specs_item_delete(context, qos_specs_id, key):
query = (
context.session.query(models.QualityOfServiceSpecs)
.filter(models.QualityOfServiceSpecs.key == key)
.filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id)
)
entity = query.column_descriptions[0]['entity']
query.update(
{
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': entity.updated_at,
}
)
@require_admin_context
@main_context_manager.writer
def qos_specs_delete(context, qos_specs_id):
_qos_specs_get_all_ref(context, qos_specs_id)
query = context.session.query(models.QualityOfServiceSpecs).filter(
or_(
models.QualityOfServiceSpecs.id == qos_specs_id,
models.QualityOfServiceSpecs.specs_id == qos_specs_id,
)
)
entity = query.column_descriptions[0]['entity']
updated_values = {
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': entity.updated_at,
}
query.update(updated_values)
del updated_values['updated_at']
return updated_values
@require_admin_context
def _qos_specs_get_item(context, qos_specs_id, key):
result = (
model_query(context, models.QualityOfServiceSpecs)
.filter(models.QualityOfServiceSpecs.key == key)
.filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id)
.first()
)
if not result:
raise exception.QoSSpecsKeyNotFound(
specs_key=key, specs_id=qos_specs_id
)
return result
@handle_db_data_error
@require_admin_context
@require_qos_specs_exists
@main_context_manager.writer
def qos_specs_update(context, qos_specs_id, values):
"""Make updates to an existing qos specs.
Perform add, update or delete key/values to a qos specs.
"""
specs = values.get('specs', {})
if 'consumer' in values:
# Massage consumer to the right place for DB and copy specs
# before updating so we don't modify dict for caller
specs = specs.copy()
specs['consumer'] = values['consumer']
spec_ref = None
for key in specs.keys():
try:
spec_ref = _qos_specs_get_item(context, qos_specs_id, key)
except exception.QoSSpecsKeyNotFound:
spec_ref = models.QualityOfServiceSpecs()
id = None
if spec_ref.get('id', None):
id = spec_ref['id']
else:
id = str(uuid.uuid4())
value = {
'id': id,
'key': key,
'value': specs[key],
'specs_id': qos_specs_id,
'deleted': False,
}
LOG.debug('qos_specs_update() value: %s', value)
spec_ref.update(value)
spec_ref.save(context.session)
return specs
####################
@require_context
def _volume_type_encryption_get(context, volume_type_id):
return (
model_query(
context,
models.Encryption,
read_deleted="no",
)
.filter_by(volume_type_id=volume_type_id)
.first()
)
@require_context
@main_context_manager.reader
def volume_type_encryption_get(context, volume_type_id):
return _volume_type_encryption_get(context, volume_type_id)
@require_admin_context
@main_context_manager.writer
def volume_type_encryption_delete(context, volume_type_id):
encryption = _volume_type_encryption_get(context, volume_type_id)
if not encryption:
raise exception.VolumeTypeEncryptionNotFound(type_id=volume_type_id)
encryption.update(
{
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': encryption.updated_at,
},
)
@handle_db_data_error
@require_admin_context
@main_context_manager.writer
def volume_type_encryption_create(context, volume_type_id, values):
encryption = models.Encryption()
if 'volume_type_id' not in values:
values['volume_type_id'] = volume_type_id
if 'encryption_id' not in values:
values['encryption_id'] = str(uuid.uuid4())
encryption.update(values)
context.session.add(encryption)
return encryption
@handle_db_data_error
@require_admin_context
@main_context_manager.writer
def volume_type_encryption_update(context, volume_type_id, values):
query = model_query(context, models.Encryption)
result = query.filter_by(volume_type_id=volume_type_id).update(values)
if not result:
raise exception.VolumeTypeEncryptionNotFound(type_id=volume_type_id)
@main_context_manager.reader
def volume_type_encryption_volume_get(context, volume_type_id):
volume_list = (
_volume_get_query(context, project_only=False)
.filter_by(volume_type_id=volume_type_id)
.all()
)
return volume_list
@require_context
@main_context_manager.reader
def volume_encryption_metadata_get(context, volume_id):
"""Return the encryption metadata for a given volume."""
volume_ref = _volume_get(context, volume_id)
encryption_ref = _volume_type_encryption_get(
context,
volume_ref['volume_type_id'],
)
values = {
'encryption_key_id': volume_ref['encryption_key_id'],
}
if encryption_ref:
for key in ['control_location', 'cipher', 'key_size', 'provider']:
values[key] = encryption_ref[key]
return values
####################
@require_context
def _volume_glance_metadata_get_all(context):
query = model_query(context, models.VolumeGlanceMetadata)
if is_user_context(context):
query = query.filter(
models.Volume.id == models.VolumeGlanceMetadata.volume_id,
models.Volume.project_id == context.project_id,
)
return query.all()
@require_context
@main_context_manager.reader
def volume_glance_metadata_get_all(context):
"""Return the Glance metadata for all volumes."""
return _volume_glance_metadata_get_all(context)
@require_context
@main_context_manager.reader
def volume_glance_metadata_list_get(context, volume_id_list):
"""Return the glance metadata for a volume list."""
query = model_query(context, models.VolumeGlanceMetadata)
query = query.filter(
models.VolumeGlanceMetadata.volume_id.in_(volume_id_list)
)
return query.all()
@require_context
@require_volume_exists
def _volume_glance_metadata_get(context, volume_id):
rows = (
model_query(context, models.VolumeGlanceMetadata)
.filter_by(volume_id=volume_id)
.filter_by(deleted=False)
.all()
)
if not rows:
raise exception.GlanceMetadataNotFound(id=volume_id)
return rows
@require_context
@main_context_manager.reader
def volume_glance_metadata_get(context, volume_id):
"""Return the Glance metadata for the specified volume."""
return _volume_glance_metadata_get(context, volume_id)
@require_context
@require_snapshot_exists
def _volume_snapshot_glance_metadata_get(context, snapshot_id):
rows = (
model_query(context, models.VolumeGlanceMetadata)
.filter_by(snapshot_id=snapshot_id)
.filter_by(deleted=False)
.all()
)
if not rows:
raise exception.GlanceMetadataNotFound(id=snapshot_id)
return rows
@require_context
@main_context_manager.reader
def volume_snapshot_glance_metadata_get(context, snapshot_id):
"""Return the Glance metadata for the specified snapshot."""
return _volume_snapshot_glance_metadata_get(context, snapshot_id)
@require_context
@require_volume_exists
@main_context_manager.writer
def volume_glance_metadata_create(context, volume_id, key, value):
"""Update the Glance metadata for a volume by adding a new key:value pair.
This API does not support changing the value of a key once it has been
created.
"""
rows = (
context.session.query(models.VolumeGlanceMetadata)
.filter_by(volume_id=volume_id)
.filter_by(key=key)
.filter_by(deleted=False)
.all()
)
if len(rows) > 0:
vol_glance_metadata = rows[0]
if vol_glance_metadata.value == str(value):
return
raise exception.GlanceMetadataExists(key=key, volume_id=volume_id)
vol_glance_metadata = models.VolumeGlanceMetadata()
vol_glance_metadata.volume_id = volume_id
vol_glance_metadata.key = key
vol_glance_metadata.value = str(value)
context.session.add(vol_glance_metadata)
return
@require_context
@require_volume_exists
@main_context_manager.writer
def volume_glance_metadata_bulk_create(context, volume_id, metadata):
"""Update the Glance metadata for a volume by adding new key:value pairs.
This API does not support changing the value of a key once it has been
created.
"""
for key, value in metadata.items():
rows = (
context.session.query(models.VolumeGlanceMetadata)
.filter_by(volume_id=volume_id)
.filter_by(key=key)
.filter_by(deleted=False)
.all()
)
if len(rows) > 0:
vol_glance_metadata = rows[0]
if vol_glance_metadata.value == str(value):
continue
raise exception.GlanceMetadataExists(key=key, volume_id=volume_id)
vol_glance_metadata = models.VolumeGlanceMetadata()
vol_glance_metadata.volume_id = volume_id
vol_glance_metadata.key = key
vol_glance_metadata.value = str(value)
context.session.add(vol_glance_metadata)
@require_context
@require_snapshot_exists
@main_context_manager.writer
def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id):
"""Update the Glance metadata for a snapshot.
This copies all of the key:value pairs from the originating volume, to
ensure that a volume created from the snapshot will retain the
original metadata.
"""
metadata = _volume_glance_metadata_get(context, volume_id)
for meta in metadata:
vol_glance_metadata = models.VolumeGlanceMetadata()
vol_glance_metadata.snapshot_id = snapshot_id
vol_glance_metadata.key = meta['key']
vol_glance_metadata.value = meta['value']
vol_glance_metadata.save(context.session)
@require_context
@main_context_manager.writer
def volume_glance_metadata_copy_from_volume_to_volume(
context,
src_volume_id,
volume_id,
):
"""Update the Glance metadata for a volume.
This copies all of the key:value pairs from the originating volume,
to ensure that a volume created from the volume (clone) will
retain the original metadata.
"""
metadata = _volume_glance_metadata_get(context, src_volume_id)
for meta in metadata:
vol_glance_metadata = models.VolumeGlanceMetadata()
vol_glance_metadata.volume_id = volume_id
vol_glance_metadata.key = meta['key']
vol_glance_metadata.value = meta['value']
vol_glance_metadata.save(context.session)
@require_context
@require_volume_exists
@main_context_manager.writer
def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id):
"""Update Glance metadata from a volume.
Update the Glance metadata from a volume (created from a snapshot) by
copying all of the key:value pairs from the originating snapshot.
This is so that the Glance metadata from the original volume is retained.
"""
metadata = _volume_snapshot_glance_metadata_get(context, snapshot_id)
for meta in metadata:
vol_glance_metadata = models.VolumeGlanceMetadata()
vol_glance_metadata.volume_id = volume_id
vol_glance_metadata.key = meta['key']
vol_glance_metadata.value = meta['value']
vol_glance_metadata.save(context.session)
@require_context
@main_context_manager.writer
def volume_glance_metadata_delete_by_volume(context, volume_id):
query = model_query(
context,
models.VolumeGlanceMetadata,
read_deleted='no',
).filter_by(volume_id=volume_id)
entity = query.column_descriptions[0]['entity']
query.update(
{
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': entity.updated_at,
},
)
@require_context
@main_context_manager.writer
def volume_glance_metadata_delete_by_snapshot(context, snapshot_id):
query = model_query(
context,
models.VolumeGlanceMetadata,
read_deleted='no',
).filter_by(snapshot_id=snapshot_id)
entity = query.column_descriptions[0]['entity']
query.update(
{
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': entity.updated_at,
},
)
###############################
@require_admin_context
def _backup_data_get_for_project(context, project_id, volume_type_id=None):
query = model_query(
context,
func.count(models.Backup.id),
func.sum(models.Backup.size),
read_deleted="no",
).filter_by(project_id=project_id)
if volume_type_id:
query = query.filter_by(volume_type_id=volume_type_id)
result = query.first()
# NOTE(vish): convert None to 0
return result[0] or 0, result[1] or 0
@require_context
@main_context_manager.reader
def backup_get(context, backup_id, read_deleted=None, project_only=True):
return _backup_get(
context,
backup_id,
read_deleted=read_deleted,
project_only=project_only,
)
def _backup_get(
context,
backup_id,
read_deleted=None,
project_only=True,
):
result = (
model_query(
context,
models.Backup,
project_only=project_only,
read_deleted=read_deleted,
)
.options(joinedload(models.Backup.backup_metadata))
.filter_by(id=backup_id)
.first()
)
if not result:
raise exception.BackupNotFound(backup_id=backup_id)
return result
def _backup_get_all(
context,
filters=None,
marker=None,
limit=None,
offset=None,
sort_keys=None,
sort_dirs=None,
):
if filters and not is_valid_model_filters(models.Backup, filters):
return []
# Generate the paginate query
query = _generate_paginate_query(
context,
marker,
limit,
sort_keys,
sort_dirs,
filters,
offset,
models.Backup,
)
if query is None:
return []
return query.all()
def _backups_get_query(context, project_only=False, joined_load=True):
query = model_query(context, models.Backup, project_only=project_only)
if joined_load:
query = query.options(joinedload(models.Backup.backup_metadata))
return query
@apply_like_filters(model=models.Backup)
def _process_backups_filters(query, filters):
if filters:
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.Backup, filters):
return
filters_dict = {}
for key, value in filters.items():
if key == 'metadata':
col_attr = getattr(models.Backup, 'backup_metadata')
for k, v in value.items():
query = query.filter(col_attr.any(key=k, value=v))
elif isinstance(value, (list, tuple, set, frozenset)):
orm_field = getattr(models.Backup, key)
query = query.filter(or_(orm_field == v for v in value))
else:
filters_dict[key] = value
# Apply exact matches
if filters_dict:
query = query.filter_by(**filters_dict)
return query
@require_admin_context
@main_context_manager.reader
def backup_get_all(
context,
filters=None,
marker=None,
limit=None,
offset=None,
sort_keys=None,
sort_dirs=None,
):
return _backup_get_all(
context, filters, marker, limit, offset, sort_keys, sort_dirs
)
@require_admin_context
@main_context_manager.reader
def backup_get_all_by_host(context, host):
return (
model_query(context, models.Backup)
.options(joinedload(models.Backup.backup_metadata))
.filter_by(host=host)
.all()
)
@require_context
@main_context_manager.reader
def backup_get_all_by_project(
context,
project_id,
filters=None,
marker=None,
limit=None,
offset=None,
sort_keys=None,
sort_dirs=None,
):
authorize_project_context(context, project_id)
if not filters:
filters = {}
else:
filters = filters.copy()
filters['project_id'] = project_id
return _backup_get_all(
context, filters, marker, limit, offset, sort_keys, sort_dirs
)
@require_context
@main_context_manager.reader
def backup_get_all_by_volume(context, volume_id, vol_project_id, filters=None):
authorize_project_context(context, vol_project_id)
if not filters:
filters = {}
else:
filters = filters.copy()
filters['volume_id'] = volume_id
return _backup_get_all(context, filters)
@require_context
@main_context_manager.reader
def backup_get_all_active_by_window(context, begin, end=None, project_id=None):
"""Return backups that were active during window."""
query = model_query(context, models.Backup, read_deleted="yes").options(
joinedload(models.Backup.backup_metadata)
)
query = query.filter(
or_(
models.Backup.deleted_at == None, # noqa
models.Backup.deleted_at > begin,
)
)
if end:
query = query.filter(models.Backup.created_at < end)
if project_id:
query = query.filter_by(project_id=project_id)
return query.all()
@handle_db_data_error
@require_context
@main_context_manager.writer
def backup_create(context, values):
values['backup_metadata'] = _metadata_refs(
values.get('metadata'), models.BackupMetadata
)
if not values.get('id'):
values['id'] = str(uuid.uuid4())
backup_ref = models.Backup()
backup_ref.update(values)
context.session.add(backup_ref)
return _backup_get(context, values['id'])
@handle_db_data_error
@require_context
@main_context_manager.writer
def backup_update(context, backup_id, values):
if 'fail_reason' in values:
values = values.copy()
values['fail_reason'] = (values['fail_reason'] or '')[:255]
query = model_query(context, models.Backup, read_deleted="yes")
result = query.filter_by(id=backup_id).update(values)
if not result:
raise exception.BackupNotFound(backup_id=backup_id)
@require_admin_context
@main_context_manager.writer
def backup_destroy(context, backup_id):
utcnow = timeutils.utcnow()
updated_values = {
'status': fields.BackupStatus.DELETED,
'deleted': True,
'deleted_at': utcnow,
}
query = model_query(context, models.Backup).filter_by(id=backup_id)
entity = query.column_descriptions[0]['entity']
updated_values['updated_at'] = entity.updated_at
query.update(updated_values)
query = model_query(
context,
models.BackupMetadata,
).filter_by(backup_id=backup_id)
entity = query.column_descriptions[0]['entity']
query.update(
{
'deleted': True,
'deleted_at': utcnow,
'updated_at': entity.updated_at,
}
)
del updated_values['updated_at']
return updated_values
def _backup_metadata_get_query(context, backup_id):
return model_query(
context, models.BackupMetadata, read_deleted="no"
).filter_by(backup_id=backup_id)
@require_context
def _backup_metadata_get(context, backup_id):
rows = _backup_metadata_get_query(context, backup_id).all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
@require_context
@require_backup_exists
@main_context_manager.reader
def backup_metadata_get(context, backup_id):
return _backup_metadata_get(context, backup_id)
@require_context
def _backup_metadata_get_item(context, backup_id, key):
result = (
_backup_metadata_get_query(context, backup_id)
.filter_by(key=key)
.first()
)
if not result:
raise exception.BackupMetadataNotFound(
metadata_key=key, backup_id=backup_id
)
return result
@require_context
@require_backup_exists
@handle_db_data_error
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def backup_metadata_update(context, backup_id, metadata, delete):
# Set existing metadata to deleted if delete argument is True
if delete:
original_metadata = _backup_metadata_get(context, backup_id)
for meta_key, meta_value in original_metadata.items():
if meta_key not in metadata:
meta_ref = _backup_metadata_get_item(
context, backup_id, meta_key
)
meta_ref.update(
{'deleted': True, 'deleted_at': timeutils.utcnow()}
)
meta_ref.save(context.session)
meta_ref = None
# Now update all existing items with new values, or create new meta
# objects
for meta_key, meta_value in metadata.items():
# update the value whether it exists or not
item = {"value": meta_value}
try:
meta_ref = _backup_metadata_get_item(context, backup_id, meta_key)
except exception.BackupMetadataNotFound:
meta_ref = models.BackupMetadata()
item.update({"key": meta_key, "backup_id": backup_id})
meta_ref.update(item)
meta_ref.save(context.session)
return backup_metadata_get(context, backup_id)
###############################
@require_context
def _transfer_get(context, transfer_id):
query = model_query(
context,
models.Transfer,
).filter_by(id=transfer_id)
if not is_admin_context(context):
volume = models.Volume
query = query.filter(
models.Transfer.volume_id == volume.id,
volume.project_id == context.project_id,
)
result = query.first()
if not result:
raise exception.TransferNotFound(transfer_id=transfer_id)
return result
@require_context
@main_context_manager.reader
def transfer_get(context, transfer_id):
return _transfer_get(context, transfer_id)
def _process_transfer_filters(query, filters):
if filters:
project_id = filters.pop('project_id', None)
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.Transfer, filters):
return
if project_id:
volume = models.Volume
query = query.filter(
volume.id == models.Transfer.volume_id,
volume.project_id == project_id,
)
query = query.filter_by(**filters)
return query
def _translate_transfers(transfers):
fields = (
'id',
'volume_id',
'display_name',
'created_at',
'deleted',
'no_snapshots',
'source_project_id',
'destination_project_id',
'accepted',
)
return [{k: transfer[k] for k in fields} for transfer in transfers]
def _transfer_get_all(
context,
marker=None,
limit=None,
sort_keys=None,
sort_dirs=None,
filters=None,
offset=None,
):
# Generate the query
query = _generate_paginate_query(
context,
marker,
limit,
sort_keys,
sort_dirs,
filters,
offset,
models.Transfer,
)
if query is None:
return []
return _translate_transfers(query.all())
@require_admin_context
@main_context_manager.reader
def transfer_get_all(
context,
marker=None,
limit=None,
sort_keys=None,
sort_dirs=None,
filters=None,
offset=None,
):
return _transfer_get_all(
context,
marker=marker,
limit=limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters,
offset=offset,
)
def _transfer_get_query(context, project_only=False):
return model_query(context, models.Transfer, project_only=project_only)
@require_context
@main_context_manager.reader
def transfer_get_all_by_project(
context,
project_id,
marker=None,
limit=None,
sort_keys=None,
sort_dirs=None,
filters=None,
offset=None,
):
authorize_project_context(context, project_id)
filters = filters.copy() if filters else {}
filters['project_id'] = project_id
return _transfer_get_all(
context,
marker=marker,
limit=limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters,
offset=offset,
)
@require_context
@handle_db_data_error
@main_context_manager.writer
def transfer_create(context, values):
if not values.get('id'):
values['id'] = str(uuid.uuid4())
transfer_id = values['id']
volume_id = values['volume_id']
expected = {'id': volume_id, 'status': 'available'}
update = {'status': 'awaiting-transfer'}
if not _conditional_update(context, models.Volume, update, expected):
msg = _(
'Transfer %(transfer_id)s: Volume id %(volume_id)s '
'expected in available state.'
) % {'transfer_id': transfer_id, 'volume_id': volume_id}
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
transfer = models.Transfer()
transfer.update(values)
context.session.add(transfer)
return transfer
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def transfer_destroy(context, transfer_id):
utcnow = timeutils.utcnow()
volume_id = _transfer_get(context, transfer_id)['volume_id']
expected = {'id': volume_id, 'status': 'awaiting-transfer'}
update = {'status': 'available'}
if not _conditional_update(context, models.Volume, update, expected):
# If the volume state is not 'awaiting-transfer' don't change it,
# but we can still mark the transfer record as deleted.
msg = _(
'Transfer %(transfer_id)s: Volume expected in '
'awaiting-transfer state.'
) % {'transfer_id': transfer_id}
LOG.error(msg)
query = model_query(context, models.Transfer).filter_by(id=transfer_id)
entity = query.column_descriptions[0]['entity']
updated_values = {
'deleted': True,
'deleted_at': utcnow,
'updated_at': entity.updated_at,
}
query.update(updated_values)
del updated_values['updated_at']
return updated_values
def _roll_back_transferred_volume_and_snapshots(
context, volume_id, old_user_id, old_project_id, transffered_snapshots
):
expected = {'id': volume_id, 'status': 'available'}
update = {
'status': 'awaiting-transfer',
'user_id': old_user_id,
'project_id': old_project_id,
'updated_at': timeutils.utcnow(),
}
if not _conditional_update(context, models.Volume, update, expected):
LOG.warning(
'Volume: %(volume_id)s is not in the expected available '
'status. Rolling it back.',
{'volume_id': volume_id},
)
return
for snapshot_id in transffered_snapshots:
LOG.info(
'Beginning to roll back transferred snapshots: %s', snapshot_id
)
expected = {'id': snapshot_id, 'status': 'available'}
update = {
'user_id': old_user_id,
'project_id': old_project_id,
'updated_at': timeutils.utcnow(),
}
if not _conditional_update(context, models.Snapshot, update, expected):
LOG.warning(
'Snapshot: %(snapshot_id)s is not in the expected '
'available state. Rolling it back.',
{'snapshot_id': snapshot_id},
)
return
@require_context
@main_context_manager.writer
def transfer_accept(
context, transfer_id, user_id, project_id, no_snapshots=False
):
volume_id = _transfer_get(context, transfer_id)['volume_id']
expected = {'id': volume_id, 'status': 'awaiting-transfer'}
update = {
'status': 'available',
'user_id': user_id,
'project_id': project_id,
'updated_at': timeutils.utcnow(),
}
if not _conditional_update(context, models.Volume, update, expected):
msg = _(
'Transfer %(transfer_id)s: Volume id %(volume_id)s '
'expected in awaiting-transfer state.'
) % {'transfer_id': transfer_id, 'volume_id': volume_id}
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Update snapshots for transfer snapshots with volume.
if not no_snapshots:
snapshots = snapshot_get_all_for_volume(context, volume_id)
transferred_snapshots = []
for snapshot in snapshots:
LOG.info('Begin to transfer snapshot: %s', snapshot['id'])
old_user_id = snapshot['user_id']
old_project_id = snapshot['project_id']
expected = {'id': snapshot['id'], 'status': 'available'}
update = {
'user_id': user_id,
'project_id': project_id,
'updated_at': timeutils.utcnow(),
}
if not _conditional_update(
context, models.Snapshot, update, expected
):
msg = _(
'Transfer %(transfer_id)s: Snapshot '
'%(snapshot_id)s is not in the expected '
'available state.'
) % {'transfer_id': transfer_id, 'snapshot_id': snapshot['id']}
LOG.error(msg)
_roll_back_transferred_volume_and_snapshots(
context,
volume_id,
old_user_id,
old_project_id,
transferred_snapshots,
)
raise exception.InvalidSnapshot(reason=msg)
transferred_snapshots.append(snapshot['id'])
query = context.session.query(models.Transfer).filter_by(id=transfer_id)
entity = query.column_descriptions[0]['entity']
query.update(
{
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': entity.updated_at,
'destination_project_id': project_id,
'accepted': True,
}
)
###############################
@require_admin_context
def _consistencygroup_data_get_for_project(context, project_id):
query = model_query(
context,
func.count(models.ConsistencyGroup.id),
read_deleted="no",
).filter_by(project_id=project_id)
result = query.first()
return (0, result[0] or 0)
@require_context
def _consistencygroup_get(context, consistencygroup_id):
result = (
model_query(
context,
models.ConsistencyGroup,
project_only=True,
)
.filter_by(id=consistencygroup_id)
.first()
)
if not result:
raise exception.ConsistencyGroupNotFound(
consistencygroup_id=consistencygroup_id
)
return result
@require_context
@main_context_manager.reader
def consistencygroup_get(context, consistencygroup_id):
return _consistencygroup_get(context, consistencygroup_id)
def _consistencygroups_get_query(context, project_only=False):
return model_query(
context,
models.ConsistencyGroup,
project_only=project_only,
)
def _process_consistencygroups_filters(query, filters):
if filters:
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.ConsistencyGroup, filters):
return
query = query.filter_by(**filters)
return query
def _consistencygroup_get_all(
context,
filters=None,
marker=None,
limit=None,
offset=None,
sort_keys=None,
sort_dirs=None,
):
if filters and not is_valid_model_filters(
models.ConsistencyGroup, filters
):
return []
# Generate the paginate query
query = _generate_paginate_query(
context,
marker,
limit,
sort_keys,
sort_dirs,
filters,
offset,
models.ConsistencyGroup,
)
if query is None:
return []
return query.all()
@require_admin_context
@main_context_manager.reader
def consistencygroup_get_all(
context,
filters=None,
marker=None,
limit=None,
offset=None,
sort_keys=None,
sort_dirs=None,
):
"""Retrieves all consistency groups.
If no sort parameters are specified then the returned cgs are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: Filters for the query in the form of key/value.
:returns: list of matching consistency groups
"""
return _consistencygroup_get_all(
context, filters, marker, limit, offset, sort_keys, sort_dirs
)
@require_context
@main_context_manager.reader
def consistencygroup_get_all_by_project(
context,
project_id,
filters=None,
marker=None,
limit=None,
offset=None,
sort_keys=None,
sort_dirs=None,
):
"""Retrieves all consistency groups in a project.
If no sort parameters are specified then the returned cgs are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: Filters for the query in the form of key/value.
:returns: list of matching consistency groups
"""
authorize_project_context(context, project_id)
if not filters:
filters = {}
else:
filters = filters.copy()
filters['project_id'] = project_id
return _consistencygroup_get_all(
context, filters, marker, limit, offset, sort_keys, sort_dirs
)
@handle_db_data_error
@require_context
@main_context_manager.writer
def consistencygroup_create(context, values, cg_snap_id=None, cg_id=None):
cg_model = models.ConsistencyGroup
values = values.copy()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
if cg_snap_id:
conditions = [
cg_model.id == models.CGSnapshot.consistencygroup_id,
models.CGSnapshot.id == cg_snap_id,
]
elif cg_id:
conditions = [cg_model.id == cg_id]
else:
conditions = None
if conditions:
# We don't want duplicated field values
names = ['volume_type_id', 'availability_zone', 'host', 'cluster_name']
for name in names:
values.pop(name, None)
fields = [getattr(cg_model, name) for name in names]
fields.extend(bindparam(k, v) for k, v in values.items())
sel = context.session.query(*fields).filter(*conditions)
names.extend(values.keys())
insert_stmt = cg_model.__table__.insert().from_select(names, sel)
result = context.session.execute(insert_stmt)
# If we couldn't insert the row because of the conditions raise
# the right exception
if not result.rowcount:
if cg_id:
raise exception.ConsistencyGroupNotFound(
consistencygroup_id=cg_id
)
raise exception.CgSnapshotNotFound(cgsnapshot_id=cg_snap_id)
else:
consistencygroup = cg_model()
consistencygroup.update(values)
context.session.add(consistencygroup)
return _consistencygroup_get(context, values['id'])
@handle_db_data_error
@require_context
@main_context_manager.writer
def consistencygroup_update(context, consistencygroup_id, values):
query = model_query(context, models.ConsistencyGroup, project_only=True)
result = query.filter_by(id=consistencygroup_id).update(values)
if not result:
raise exception.ConsistencyGroupNotFound(
consistencygroup_id=consistencygroup_id
)
@require_admin_context
@main_context_manager.writer
def consistencygroup_destroy(context, consistencygroup_id):
utcnow = timeutils.utcnow()
query = model_query(
context,
models.ConsistencyGroup,
).filter_by(id=consistencygroup_id)
entity = query.column_descriptions[0]['entity']
updated_values = {
'status': fields.ConsistencyGroupStatus.DELETED,
'deleted': True,
'deleted_at': utcnow,
'updated_at': entity.updated_at,
}
query.update(updated_values)
del updated_values['updated_at']
return updated_values
def cg_has_cgsnapshot_filter():
"""Return a filter that checks if a CG has CG Snapshots."""
return sql.exists().where(
and_(
models.CGSnapshot.consistencygroup_id
== models.ConsistencyGroup.id,
~models.CGSnapshot.deleted,
)
)
def cg_has_volumes_filter(attached_or_with_snapshots=False):
"""Return a filter to check if a CG has volumes.
When attached_or_with_snapshots parameter is given a True value only
attached volumes or those with snapshots will be considered.
"""
query = sql.exists().where(
and_(
models.Volume.consistencygroup_id == models.ConsistencyGroup.id,
~models.Volume.deleted,
)
)
if attached_or_with_snapshots:
query = query.where(
or_(
models.Volume.attach_status == 'attached',
sql.exists().where(
and_(
models.Volume.id == models.Snapshot.volume_id,
~models.Snapshot.deleted,
)
),
)
)
return query
def cg_creating_from_src(cg_id=None, cgsnapshot_id=None):
"""Return a filter to check if a CG is being used as creation source.
Returned filter is meant to be used in the Conditional Update mechanism and
checks if provided CG ID or CG Snapshot ID is currently being used to
create another CG.
This filter will not include CGs that have used the ID but have already
finished their creation (status is no longer creating).
Filter uses a subquery that allows it to be used on updates to the
consistencygroups table.
"""
# NOTE(geguileo): As explained in devref api_conditional_updates we use a
# subquery to trick MySQL into using the same table in the update and the
# where clause.
subq = (
sql.select(models.ConsistencyGroup)
.where(
and_(
~models.ConsistencyGroup.deleted,
models.ConsistencyGroup.status == 'creating',
)
)
.alias('cg2')
)
if cg_id:
match_id = subq.c.source_cgid == cg_id
elif cgsnapshot_id:
match_id = subq.c.cgsnapshot_id == cgsnapshot_id
else:
msg = _(
'cg_creating_from_src must be called with cg_id or '
'cgsnapshot_id parameter.'
)
raise exception.ProgrammingError(reason=msg)
return sql.exists([subq]).where(match_id)
@require_admin_context
@main_context_manager.writer
def consistencygroup_include_in_cluster(
context, cluster, partial_rename=True, **filters
):
"""Include all consistency groups matching the filters into a cluster."""
return _include_in_cluster(
context,
cluster,
models.ConsistencyGroup,
partial_rename,
filters,
)
###############################
@require_admin_context
def _group_data_get_for_project(context, project_id):
query = model_query(
context,
func.count(models.Group.id),
read_deleted="no",
).filter_by(project_id=project_id)
result = query.first()
return (0, result[0] or 0)
@require_context
def _group_get(context, group_id):
result = (
model_query(context, models.Group, project_only=True)
.filter_by(id=group_id)
.first()
)
if not result:
raise exception.GroupNotFound(group_id=group_id)
return result
@require_context
@main_context_manager.reader
def group_get(context, group_id):
return _group_get(context, group_id)
def _groups_get_query(context, project_only=False):
return model_query(context, models.Group, project_only=project_only)
def _group_snapshot_get_query(context, project_only=False):
return model_query(
context,
models.GroupSnapshot,
project_only=project_only,
)
@apply_like_filters(model=models.Group)
def _process_groups_filters(query, filters):
if filters:
# NOTE(xyang): backend_match_level needs to be handled before
# is_valid_model_filters is called as it is not a column name
# in the db.
backend_match_level = filters.pop('backend_match_level', 'backend')
# host is a valid filter. Filter the query by host and
# backend_match_level first.
host = filters.pop('host', None)
if host:
query = query.filter(
_filter_host(
models.Group.host, host, match_level=backend_match_level
)
)
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.Group, filters):
return
query = query.filter_by(**filters)
return query
@apply_like_filters(model=models.GroupSnapshot)
def _process_group_snapshot_filters(query, filters):
if filters:
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.GroupSnapshot, filters):
return
query = query.filter_by(**filters)
return query
def _group_get_all(
context,
filters=None,
marker=None,
limit=None,
offset=None,
sort_keys=None,
sort_dirs=None,
):
# No need to call is_valid_model_filters here. It is called
# in _process_group_filters when _generate_paginate_query
# is called below.
# Generate the paginate query
query = _generate_paginate_query(
context,
marker,
limit,
sort_keys,
sort_dirs,
filters,
offset,
models.Group,
)
return query.all() if query else []
@require_admin_context
@main_context_manager.reader
def group_get_all(
context,
filters=None,
marker=None,
limit=None,
offset=None,
sort_keys=None,
sort_dirs=None,
):
"""Retrieves all groups.
If no sort parameters are specified then the returned groups are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: Filters for the query in the form of key/value.
:returns: list of matching groups
"""
return _group_get_all(
context, filters, marker, limit, offset, sort_keys, sort_dirs
)
@require_context
@main_context_manager.reader
def group_get_all_by_project(
context,
project_id,
filters=None,
marker=None,
limit=None,
offset=None,
sort_keys=None,
sort_dirs=None,
):
"""Retrieves all groups in a project.
If no sort parameters are specified then the returned groups are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: Filters for the query in the form of key/value.
:returns: list of matching groups
"""
authorize_project_context(context, project_id)
if not filters:
filters = {}
else:
filters = filters.copy()
filters['project_id'] = project_id
return _group_get_all(
context, filters, marker, limit, offset, sort_keys, sort_dirs
)
@handle_db_data_error
@require_context
@main_context_manager.writer
def group_create(
context, values, group_snapshot_id=None, source_group_id=None
):
group_model = models.Group
values = values.copy()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
if group_snapshot_id:
conditions = [
group_model.id == models.GroupSnapshot.group_id,
models.GroupSnapshot.id == group_snapshot_id,
]
elif source_group_id:
conditions = [group_model.id == source_group_id]
else:
conditions = None
if conditions:
# We don't want duplicated field values
values.pop('group_type_id', None)
values.pop('availability_zone', None)
values.pop('host', None)
values.pop('cluster_name', None)
# NOTE(xyang): Save volume_type_ids to update later.
volume_type_ids = values.pop('volume_type_ids', [])
sel = context.session.query(
group_model.group_type_id,
group_model.availability_zone,
group_model.host,
group_model.cluster_name,
*(bindparam(k, v) for k, v in values.items()),
).filter(*conditions)
names = ['group_type_id', 'availability_zone', 'host', 'cluster_name']
names.extend(values.keys())
insert_stmt = group_model.__table__.insert().from_select(names, sel)
result = context.session.execute(insert_stmt)
# If we couldn't insert the row because of the conditions raise
# the right exception
if not result.rowcount:
if source_group_id:
raise exception.GroupNotFound(group_id=source_group_id)
raise exception.GroupSnapshotNotFound(
group_snapshot_id=group_snapshot_id
)
for item in volume_type_ids:
mapping = models.GroupVolumeTypeMapping()
mapping['volume_type_id'] = item
mapping['group_id'] = values['id']
context.session.add(mapping)
else:
for item in values.get('volume_type_ids') or []:
mapping = models.GroupVolumeTypeMapping()
mapping['volume_type_id'] = item
mapping['group_id'] = values['id']
context.session.add(mapping)
group = group_model()
group.update(values)
context.session.add(group)
return _group_get(context, values['id'])
@handle_db_data_error
@require_context
@main_context_manager.writer
def group_volume_type_mapping_create(context, group_id, volume_type_id):
"""Add group volume_type mapping entry."""
# Verify group exists
_group_get(context, group_id)
# Verify volume type exists
_volume_type_get_id_from_volume_type(context, volume_type_id)
existing = _group_volume_type_mapping_get_all_by_group_volume_type(
context, group_id, volume_type_id
)
if existing:
raise exception.GroupVolumeTypeMappingExists(
group_id=group_id, volume_type_id=volume_type_id
)
mapping = models.GroupVolumeTypeMapping()
mapping.update({"group_id": group_id, "volume_type_id": volume_type_id})
try:
mapping.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.GroupVolumeTypeMappingExists(
group_id=group_id, volume_type_id=volume_type_id
)
return mapping
@handle_db_data_error
@require_context
@main_context_manager.writer
def group_update(context, group_id, values):
query = model_query(context, models.Group, project_only=True)
result = query.filter_by(id=group_id).update(values)
if not result:
raise exception.GroupNotFound(group_id=group_id)
@require_admin_context
@main_context_manager.writer
def group_destroy(context, group_id):
query = model_query(context, models.Group).filter_by(id=group_id)
entity = query.column_descriptions[0]['entity']
query.update(
{
'status': fields.GroupStatus.DELETED,
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': entity.updated_at,
}
)
query = context.session.query(
models.GroupVolumeTypeMapping,
).filter_by(group_id=group_id)
entity = query.column_descriptions[0]['entity']
query.update(
{
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': entity.updated_at,
}
)
def group_has_group_snapshot_filter():
return sql.exists().where(
and_(
models.GroupSnapshot.group_id == models.Group.id,
~models.GroupSnapshot.deleted,
)
)
def group_has_volumes_filter(attached_or_with_snapshots=False):
query = sql.exists().where(
and_(models.Volume.group_id == models.Group.id, ~models.Volume.deleted)
)
if attached_or_with_snapshots:
query = query.where(
or_(
models.Volume.attach_status == 'attached',
sql.exists().where(
and_(
models.Volume.id == models.Snapshot.volume_id,
~models.Snapshot.deleted,
)
),
)
)
return query
def group_creating_from_src(group_id=None, group_snapshot_id=None):
# NOTE(geguileo): As explained in devref api_conditional_updates we use a
# subquery to trick MySQL into using the same table in the update and the
# where clause.
subq = (
sql.select(models.Group)
.where(and_(~models.Group.deleted, models.Group.status == 'creating'))
.alias('group2')
)
if group_id:
match_id = subq.c.source_group_id == group_id
elif group_snapshot_id:
match_id = subq.c.group_snapshot_id == group_snapshot_id
else:
msg = _(
'group_creating_from_src must be called with group_id or '
'group_snapshot_id parameter.'
)
raise exception.ProgrammingError(reason=msg)
return sql.exists(subq).where(match_id)
@require_admin_context
@main_context_manager.writer
def group_include_in_cluster(context, cluster, partial_rename=True, **filters):
"""Include all generic groups matching the filters into a cluster."""
return _include_in_cluster(
context, cluster, models.Group, partial_rename, filters
)
###############################
@require_context
def _cgsnapshot_get(context, cgsnapshot_id):
result = (
model_query(context, models.CGSnapshot, project_only=True)
.filter_by(id=cgsnapshot_id)
.first()
)
if not result:
raise exception.CgSnapshotNotFound(cgsnapshot_id=cgsnapshot_id)
return result
@require_context
@main_context_manager.reader
def cgsnapshot_get(context, cgsnapshot_id):
return _cgsnapshot_get(context, cgsnapshot_id)
def is_valid_model_filters(model, filters, exclude_list=None):
"""Return True if filter values exist on the model
:param model: a Cinder model
:param filters: dictionary of filters
"""
for key in filters.keys():
if exclude_list and key in exclude_list:
continue
if key == 'metadata':
if not isinstance(filters[key], dict):
LOG.debug("Metadata filter value is not valid dictionary")
return False
continue
try:
key = key.rstrip('~')
getattr(model, key)
except AttributeError:
LOG.debug("'%s' filter key is not valid.", key)
return False
return True
def _cgsnapshot_get_all(context, project_id=None, group_id=None, filters=None):
query = model_query(context, models.CGSnapshot)
if filters:
if not is_valid_model_filters(models.CGSnapshot, filters):
return []
query = query.filter_by(**filters)
if project_id:
query = query.filter_by(project_id=project_id)
if group_id:
query = query.filter_by(consistencygroup_id=group_id)
return query.all()
@require_admin_context
@main_context_manager.reader
def cgsnapshot_get_all(context, filters=None):
return _cgsnapshot_get_all(context, filters=filters)
@require_admin_context
@main_context_manager.reader
def cgsnapshot_get_all_by_group(context, group_id, filters=None):
return _cgsnapshot_get_all(context, group_id=group_id, filters=filters)
@require_context
@main_context_manager.reader
def cgsnapshot_get_all_by_project(context, project_id, filters=None):
authorize_project_context(context, project_id)
return _cgsnapshot_get_all(context, project_id=project_id, filters=filters)
@handle_db_data_error
@require_context
@main_context_manager.writer
def cgsnapshot_create(context, values):
if not values.get('id'):
values['id'] = str(uuid.uuid4())
cg_id = values.get('consistencygroup_id')
model = models.CGSnapshot
if cg_id:
# There has to exist at least 1 volume in the CG and the CG cannot
# be updating the composing volumes or being created.
conditions = [
sql.exists().where(
and_(
~models.Volume.deleted,
models.Volume.consistencygroup_id == cg_id,
),
),
~models.ConsistencyGroup.deleted,
models.ConsistencyGroup.id == cg_id,
~models.ConsistencyGroup.status.in_(('creating', 'updating')),
]
# NOTE(geguileo): We build a "fake" from_select clause instead of
# using transaction isolation on the session because we would need
# SERIALIZABLE level and that would have a considerable performance
# penalty.
binds = (bindparam(k, v) for k, v in values.items())
sel = context.session.query(*binds).filter(*conditions)
insert_stmt = model.__table__.insert().from_select(
values.keys(),
sel,
)
result = context.session.execute(insert_stmt)
# If we couldn't insert the row because of the conditions raise
# the right exception
if not result.rowcount:
msg = _(
"Source CG cannot be empty or in 'creating' or "
"'updating' state. No cgsnapshot will be created."
)
raise exception.InvalidConsistencyGroup(reason=msg)
else:
cgsnapshot = model()
cgsnapshot.update(values)
context.session.add(cgsnapshot)
return _cgsnapshot_get(context, values['id'])
@require_context
@handle_db_data_error
@main_context_manager.writer
def cgsnapshot_update(context, cgsnapshot_id, values):
query = model_query(context, models.CGSnapshot, project_only=True)
result = query.filter_by(id=cgsnapshot_id).update(values)
if not result:
raise exception.CgSnapshotNotFound(cgsnapshot_id=cgsnapshot_id)
@require_admin_context
@main_context_manager.writer
def cgsnapshot_destroy(context, cgsnapshot_id):
query = model_query(context, models.CGSnapshot).filter_by(id=cgsnapshot_id)
entity = query.column_descriptions[0]['entity']
updated_values = {
'status': 'deleted',
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': entity.updated_at,
}
query.update(updated_values)
del updated_values['updated_at']
return updated_values
def cgsnapshot_creating_from_src():
"""Get a filter that checks if a CGSnapshot is being created from a CG."""
return sql.exists().where(
and_(
models.CGSnapshot.consistencygroup_id
== models.ConsistencyGroup.id,
~models.CGSnapshot.deleted,
models.CGSnapshot.status == 'creating',
)
)
###############################
@require_context
def _group_snapshot_get(context, group_snapshot_id):
result = (
model_query(context, models.GroupSnapshot, project_only=True)
.filter_by(id=group_snapshot_id)
.first()
)
if not result:
raise exception.GroupSnapshotNotFound(
group_snapshot_id=group_snapshot_id
)
return result
@require_context
@main_context_manager.reader
def group_snapshot_get(context, group_snapshot_id):
return _group_snapshot_get(context, group_snapshot_id)
def _group_snapshot_get_all(
context,
filters=None,
marker=None,
limit=None,
offset=None,
sort_keys=None,
sort_dirs=None,
):
if filters and not is_valid_model_filters(
models.GroupSnapshot,
filters,
):
return []
# Generate the paginate query
query = _generate_paginate_query(
context,
marker,
limit,
sort_keys,
sort_dirs,
filters,
offset,
models.GroupSnapshot,
)
return query.all() if query else []
@require_admin_context
@main_context_manager.reader
def group_snapshot_get_all(
context,
filters=None,
marker=None,
limit=None,
offset=None,
sort_keys=None,
sort_dirs=None,
):
return _group_snapshot_get_all(
context,
filters,
marker,
limit,
offset,
sort_keys,
sort_dirs,
)
@require_admin_context
@main_context_manager.reader
def group_snapshot_get_all_by_group(
context,
group_id,
filters=None,
marker=None,
limit=None,
offset=None,
sort_keys=None,
sort_dirs=None,
):
if filters is None:
filters = {}
if group_id:
filters['group_id'] = group_id
return _group_snapshot_get_all(
context, filters, marker, limit, offset, sort_keys, sort_dirs
)
@require_context
@main_context_manager.reader
def group_snapshot_get_all_by_project(
context,
project_id,
filters=None,
marker=None,
limit=None,
offset=None,
sort_keys=None,
sort_dirs=None,
):
authorize_project_context(context, project_id)
if filters is None:
filters = {}
if project_id:
filters['project_id'] = project_id
return _group_snapshot_get_all(
context, filters, marker, limit, offset, sort_keys, sort_dirs
)
@handle_db_data_error
@require_context
@main_context_manager.writer
def group_snapshot_create(context, values):
if not values.get('id'):
values['id'] = str(uuid.uuid4())
group_id = values.get('group_id')
model = models.GroupSnapshot
if group_id:
# There has to exist at least 1 volume in the group and the group
# cannot be updating the composing volumes or being created.
conditions = [
sql.exists().where(
and_(
~models.Volume.deleted, models.Volume.group_id == group_id
)
),
~models.Group.deleted,
models.Group.id == group_id,
~models.Group.status.in_(('creating', 'updating')),
]
# NOTE(geguileo): We build a "fake" from_select clause instead of
# using transaction isolation on the session because we would need
# SERIALIZABLE level and that would have a considerable performance
# penalty.
binds = (bindparam(k, v) for k, v in values.items())
sel = context.session.query(*binds).filter(*conditions)
insert_stmt = model.__table__.insert().from_select(values.keys(), sel)
result = context.session.execute(insert_stmt)
# If we couldn't insert the row because of the conditions raise
# the right exception
if not result.rowcount:
msg = _(
"Source group cannot be empty or in 'creating' or "
"'updating' state. No group snapshot will be created."
)
raise exception.InvalidGroup(reason=msg)
else:
group_snapshot = model()
group_snapshot.update(values)
context.session.add(group_snapshot)
return _group_snapshot_get(context, values['id'])
@require_context
@handle_db_data_error
@main_context_manager.writer
def group_snapshot_update(context, group_snapshot_id, values):
result = (
model_query(context, models.GroupSnapshot, project_only=True)
.filter_by(id=group_snapshot_id)
.first()
)
if not result:
raise exception.GroupSnapshotNotFound(
_("No group snapshot with id %s") % group_snapshot_id
)
result.update(values)
result.save(context.session)
return result
@require_admin_context
@main_context_manager.writer
def group_snapshot_destroy(context, group_snapshot_id):
query = model_query(context, models.GroupSnapshot).filter_by(
id=group_snapshot_id
)
entity = query.column_descriptions[0]['entity']
updated_values = {
'status': 'deleted',
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': entity.updated_at,
}
query.update(updated_values)
del updated_values['updated_at']
return updated_values
def group_snapshot_creating_from_src():
"""Get a filter to check if a grp snapshot is being created from a grp."""
return sql.exists().where(
and_(
models.GroupSnapshot.group_id == models.Group.id,
~models.GroupSnapshot.deleted,
models.GroupSnapshot.status == 'creating',
)
)
###############################
@require_admin_context
@main_context_manager.writer
def purge_deleted_rows(context, age_in_days):
"""Purge deleted rows older than age from cinder tables."""
try:
age_in_days = int(age_in_days)
except ValueError:
msg = _('Invalid value for age, %(age)s')
LOG.exception(msg, {'age': age_in_days})
raise exception.InvalidParameterValue(msg % {'age': age_in_days})
engine = get_engine()
metadata = MetaData()
metadata.reflect(engine)
deleted_age = timeutils.utcnow() - dt.timedelta(days=age_in_days)
for table in reversed(metadata.sorted_tables):
if 'deleted' not in table.columns.keys():
continue
LOG.info(
'Purging deleted rows older than age=%(age)d days '
'from table=%(table)s',
{'age': age_in_days, 'table': table},
)
try:
# Delete child records first from quality_of_service_specs
# table to avoid FK constraints
if str(table) == 'quality_of_service_specs':
context.session.query(models.QualityOfServiceSpecs).filter(
and_(
models.QualityOfServiceSpecs.specs_id.isnot(None),
models.QualityOfServiceSpecs.deleted.is_(True),
models.QualityOfServiceSpecs.deleted_at < deleted_age,
)
).delete()
result = context.session.execute(
table.delete().where(
and_(
table.columns.deleted.is_(True),
table.c.deleted_at < deleted_age,
)
)
)
except db_exc.DBReferenceError as ex:
LOG.error(
'DBError detected when purging from %(tablename)s: %(error)s.',
{'tablename': table, 'error': ex},
)
raise
rows_purged = result.rowcount
if rows_purged != 0:
LOG.info(
'Deleted %(row)d rows from table=%(table)s',
{'row': rows_purged, 'table': table},
)
###############################
@require_admin_context
@main_context_manager.writer
def reset_active_backend(
context,
enable_replication,
active_backend_id,
backend_host,
):
service = objects.Service.get_by_host_and_topic(
context,
backend_host,
'cinder-volume',
disabled=True,
)
if not service.frozen:
raise exception.ServiceUnavailable(
'Service for host %(host)s must first be frozen.'
% {'host': backend_host},
)
actions = {
'disabled': False,
'disabled_reason': '',
'active_backend_id': None,
'replication_status': 'enabled',
}
expectations = {
'frozen': True,
'disabled': True,
}
if service.is_clustered:
service.cluster.conditional_update(actions, expectations)
service.cluster.reset_service_replication()
else:
service.conditional_update(actions, expectations)
###############################
def _translate_messages(messages):
return [_translate_message(message) for message in messages]
def _translate_message(message):
"""Translate the Message model to a dict."""
return {
'id': message['id'],
'project_id': message['project_id'],
'request_id': message['request_id'],
'resource_type': message['resource_type'],
'resource_uuid': message.get('resource_uuid'),
'event_id': message['event_id'],
'detail_id': message['detail_id'],
'action_id': message['action_id'],
'message_level': message['message_level'],
'created_at': message['created_at'],
'expires_at': message.get('expires_at'),
}
def _message_get(context, message_id):
query = model_query(
context,
models.Message,
read_deleted="no",
project_only="yes",
)
result = query.filter_by(id=message_id).first()
if not result:
raise exception.MessageNotFound(message_id=message_id)
return result
@require_context
@main_context_manager.reader
def message_get(context, message_id):
result = _message_get(context, message_id)
return _translate_message(result)
@require_context
@main_context_manager.reader
def message_get_all(
context,
filters=None,
marker=None,
limit=None,
offset=None,
sort_keys=None,
sort_dirs=None,
):
"""Retrieves all messages.
If no sort parameters are specified then the returned messages are
sorted first by the 'created_at' key and then by the 'id' key in
descending order.
:param context: context to query under
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching is used for other
values, see _process_messages_filters function for more information
:returns: list of matching messages
"""
# Generate the paginate query
query = _generate_paginate_query(
context,
marker,
limit,
sort_keys,
sort_dirs,
filters,
offset,
models.Message,
)
if query is None:
return []
results = query.all()
return _translate_messages(results)
@apply_like_filters(model=models.Message)
def _process_messages_filters(query, filters):
if filters:
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.Message, filters):
return None
query = query.filter_by(**filters)
return query
def _messages_get_query(context, project_only=False):
return model_query(context, models.Message, project_only=project_only)
@require_context
@main_context_manager.writer
def message_create(context, values):
message_ref = models.Message()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
message_ref.update(values)
context.session.add(message_ref)
@require_admin_context
@main_context_manager.writer
def message_destroy(context, message_id):
now = timeutils.utcnow()
query = model_query(context, models.Message).filter_by(id=message_id)
entity = query.column_descriptions[0]['entity']
updated_values = {
'deleted': True,
'deleted_at': now,
'updated_at': entity.updated_at,
}
query.update(updated_values)
del updated_values['updated_at']
return updated_values
@require_admin_context
@main_context_manager.writer
def cleanup_expired_messages(context):
now = timeutils.utcnow()
# NOTE(tommylikehu): Directly delete the expired
# messages here.
return (
context.session.query(models.Message)
.filter(models.Message.expires_at < now)
.delete()
)
###############################
@require_context
@main_context_manager.writer
def driver_initiator_data_insert_by_key(
context,
initiator,
namespace,
key,
value,
):
data = models.DriverInitiatorData()
data.initiator = initiator
data.namespace = namespace
data.key = key
data.value = value
try:
data.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.DriverInitiatorDataExists(
initiator=initiator,
namespace=namespace,
key=key,
)
return data
@require_context
@main_context_manager.reader
def driver_initiator_data_get(context, initiator, namespace):
return (
context.session.query(models.DriverInitiatorData)
.filter_by(initiator=initiator)
.filter_by(namespace=namespace)
.all()
)
###############################
@require_context
@main_context_manager.writer
def image_volume_cache_create(
context,
host,
cluster_name,
image_id,
image_updated_at,
volume_id,
size,
):
cache_entry = models.ImageVolumeCacheEntry()
cache_entry.host = host
cache_entry.cluster_name = cluster_name
cache_entry.image_id = image_id
cache_entry.image_updated_at = image_updated_at
cache_entry.volume_id = volume_id
cache_entry.size = size
context.session.add(cache_entry)
return cache_entry
@require_context
@main_context_manager.writer
def image_volume_cache_delete(context, volume_id):
context.session.query(
models.ImageVolumeCacheEntry,
).filter_by(volume_id=volume_id).delete()
@require_context
@main_context_manager.writer
def image_volume_cache_get_and_update_last_used(context, image_id, **filters):
filters = _clean_filters(filters)
entry = (
context.session.query(models.ImageVolumeCacheEntry)
.filter_by(image_id=image_id)
.filter_by(**filters)
.order_by(desc(models.ImageVolumeCacheEntry.last_used))
.first()
)
if entry:
entry.last_used = timeutils.utcnow()
entry.save(context.session)
return entry
@require_context
@main_context_manager.reader
def image_volume_cache_get_by_volume_id(context, volume_id):
return (
context.session.query(models.ImageVolumeCacheEntry)
.filter_by(volume_id=volume_id)
.first()
)
@require_context
@main_context_manager.reader
def image_volume_cache_get_all(context, **filters):
filters = _clean_filters(filters)
return (
context.session.query(models.ImageVolumeCacheEntry)
.filter_by(**filters)
.order_by(desc(models.ImageVolumeCacheEntry.last_used))
.all()
)
@require_admin_context
@main_context_manager.writer
def image_volume_cache_include_in_cluster(
context,
cluster,
partial_rename=True,
**filters,
):
"""Include all volumes matching the filters into a cluster."""
filters = _clean_filters(filters)
return _include_in_cluster(
context,
cluster,
models.ImageVolumeCacheEntry,
partial_rename,
filters,
)
###################
def _worker_query(
context,
until=None,
db_filters=None,
ignore_sentinel=True,
**filters,
):
# Remove all filters based on the workers table that are set to None
filters = _clean_filters(filters)
if filters and not is_valid_model_filters(models.Worker, filters):
return None
query = model_query(context, models.Worker)
# TODO: Once we stop creating the SENTINEL entry in the database (which
# was only needed to support MySQL 5.5), we can drop this. Probably in the
# A release or later
if ignore_sentinel:
# We don't want to retrieve the workers sentinel
query = query.filter(models.Worker.resource_type != 'SENTINEL')
if until:
db_filters = list(db_filters) if db_filters else []
# Since we set updated_at at creation time we don't need to check
# created_at field.
db_filters.append(models.Worker.updated_at <= until)
if db_filters:
query = query.filter(and_(*db_filters))
if filters:
query = query.filter_by(**filters)
return query
def _worker_set_updated_at_field(values):
updated_at = values.get('updated_at', timeutils.utcnow())
if isinstance(updated_at, str):
return
values['updated_at'] = updated_at
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def worker_create(context, **values):
"""Create a worker entry from optional arguments."""
_worker_set_updated_at_field(values)
worker = models.Worker(**values)
try:
worker.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.WorkerExists(
type=values.get('resource_type'),
id=values.get('resource_id'),
)
return worker
@require_context
@main_context_manager.reader
def worker_get(context, **filters):
"""Get a worker or raise exception if it does not exist."""
query = _worker_query(context, **filters)
worker = query.first() if query else None
if not worker:
raise exception.WorkerNotFound(**filters)
return worker
@require_context
@main_context_manager.reader
def worker_get_all(context, until=None, db_filters=None, **filters):
"""Get all workers that match given criteria."""
query = _worker_query(
context,
until=until,
db_filters=db_filters,
**filters,
)
return query.all() if query else []
def _orm_worker_update(worker, values):
if not worker:
return
for key, value in values.items():
setattr(worker, key, value)
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def worker_update(context, id, filters=None, orm_worker=None, **values):
"""Update a worker with given values."""
filters = filters or {}
query = _worker_query(context, id=id, **filters)
# If we want to update the orm_worker and we don't set the update_at field
# we set it here instead of letting SQLAlchemy do it to be able to update
# the orm_worker.
_worker_set_updated_at_field(values)
reference = orm_worker or models.Worker
values['race_preventer'] = reference.race_preventer + 1
result = query.update(values)
if not result:
raise exception.WorkerNotFound(id=id, **filters)
_orm_worker_update(orm_worker, values)
return result
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def worker_claim_for_cleanup(context, claimer_id, orm_worker):
"""Claim a worker entry for cleanup."""
# We set updated_at value so we are sure we update the DB entry even if the
# service_id is the same in the DB, thus flagging the claim.
values = {
'service_id': claimer_id,
'race_preventer': orm_worker.race_preventer + 1,
'updated_at': timeutils.utcnow(),
}
_worker_set_updated_at_field(values)
# We only update the worker entry if it hasn't been claimed by other host
# or thread
query = _worker_query(
context,
status=orm_worker.status,
service_id=orm_worker.service_id,
race_preventer=orm_worker.race_preventer,
until=orm_worker.updated_at,
id=orm_worker.id,
)
result = query.update(values, synchronize_session=False)
if result:
_orm_worker_update(orm_worker, values)
return result
@require_context
@main_context_manager.writer
def worker_destroy(context, **filters):
"""Delete a worker (no soft delete)."""
query = _worker_query(context, **filters)
return query.delete()
###############################
# TODO: (D Release) remove method and this comment
@enginefacade.writer
def remove_temporary_admin_metadata_data_migration(context, max_count):
admin_meta_table = models.VolumeAdminMetadata
query = model_query(context,
admin_meta_table.id).filter_by(key='temporary')
total = query.count()
ids_query = query.limit(max_count).subquery()
update_args = {'synchronize_session': False}
# We cannot use limit with update or delete so create a new query
updated = model_query(context, admin_meta_table).\
filter(admin_meta_table.id.in_(ids_query)).\
update(admin_meta_table.delete_values(), **update_args)
return total, updated
###############################
PAGINATION_HELPERS = {
models.Volume: (_volume_get_query, _process_volume_filters, _volume_get),
models.Snapshot: (_snaps_get_query, _process_snaps_filters, _snapshot_get),
models.Backup: (_backups_get_query, _process_backups_filters, _backup_get),
models.QualityOfServiceSpecs: (
_qos_specs_get_query,
_process_qos_specs_filters,
_qos_specs_get,
),
models.VolumeType: (
_volume_type_get_query,
_process_volume_types_filters,
_volume_type_get_db_object,
),
models.ConsistencyGroup: (
_consistencygroups_get_query,
_process_consistencygroups_filters,
_consistencygroup_get,
),
models.Message: (
_messages_get_query,
_process_messages_filters,
_message_get,
),
models.GroupType: (
_group_type_get_query,
_process_group_types_filters,
_group_type_get_db_object,
),
models.Group: (_groups_get_query, _process_groups_filters, _group_get),
models.GroupSnapshot: (
_group_snapshot_get_query,
_process_group_snapshot_filters,
_group_snapshot_get,
),
models.VolumeAttachment: (
_attachment_get_query,
_process_attachment_filters,
_attachment_get,
),
models.Transfer: (
_transfer_get_query,
_process_transfer_filters,
_transfer_get,
),
}
CALCULATE_COUNT_HELPERS = {
'volume': (_volume_get_query, _process_volume_filters),
'snapshot': (_snaps_get_query, _process_snaps_filters),
'backup': (_backups_get_query, _process_backups_filters),
}
def get_projects(context, model, read_deleted="no"):
return model_query(context, model, read_deleted=read_deleted).\
with_entities(sa.Column('project_id')).distinct().all()
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/db/sqlalchemy/models.py 0000664 0000000 0000000 00000120207 15131732575 0024222 0 ustar 00root root 0000000 0000000 # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models for cinder data.
"""
from oslo_config import cfg
from oslo_db.sqlalchemy import models
from oslo_utils import timeutils
import sqlalchemy as sa
# imports needed for cinderlib
from sqlalchemy import Column, String, Text # noqa: F401
from sqlalchemy import func
from sqlalchemy import schema
from sqlalchemy.sql import expression
from sqlalchemy.orm import backref, column_property, declarative_base, \
relationship, validates
CONF = cfg.CONF
BASE = declarative_base()
class CinderBase(models.TimestampMixin, models.ModelBase):
"""Base class for Cinder Models."""
__table_args__ = {'mysql_engine': 'InnoDB'}
# TODO(rpodolyaka): reuse models.SoftDeleteMixin in the next stage
# of implementing of BP db-cleanup
deleted_at = sa.Column(sa.DateTime)
deleted = sa.Column(sa.Boolean, default=False)
metadata = None
@staticmethod
def delete_values():
return {'deleted': True, 'deleted_at': timeutils.utcnow()}
def delete(self, session):
"""Delete this object."""
updated_values = self.delete_values()
updated_values['updated_at'] = self.updated_at
self.update(updated_values)
self.save(session=session)
del updated_values['updated_at']
return updated_values
class Service(BASE, CinderBase):
"""Represents a running service on a host."""
__tablename__ = 'services'
__table_args__ = (
sa.Index('services_uuid_idx', 'uuid', unique=True),
CinderBase.__table_args__,
)
id = sa.Column(sa.Integer, primary_key=True)
uuid = sa.Column(sa.String(36), nullable=True)
cluster_name = sa.Column(sa.String(255), nullable=True)
host = sa.Column(sa.String(255)) # , sa.ForeignKey('hosts.id'))
binary = sa.Column(sa.String(255))
# We want to overwrite default updated_at definition so we timestamp at
# creation as well, so we only need to check updated_at for the heartbeat
updated_at = sa.Column(
sa.DateTime, default=timeutils.utcnow, onupdate=timeutils.utcnow
)
topic = sa.Column(sa.String(255))
report_count = sa.Column(sa.Integer, nullable=False, default=0)
disabled = sa.Column(sa.Boolean, default=False)
availability_zone = sa.Column(sa.String(255), default='cinder')
disabled_reason = sa.Column(sa.String(255))
# adding column modified_at to contain timestamp
# for manual enable/disable of cinder services
# updated_at column will now contain timestamps for
# periodic updates
modified_at = sa.Column(sa.DateTime)
# Version columns to support rolling upgrade. These report the max RPC API
# and objects versions that the manager of the service is able to support.
rpc_current_version = sa.Column(sa.String(36))
object_current_version = sa.Column(sa.String(36))
# replication_status can be: enabled, disabled, not-capable, error,
# failed-over or not-configured
replication_status = sa.Column(sa.String(36), default="not-capable")
active_backend_id = sa.Column(sa.String(255))
# TODO(stephenfin): Add nullable=False
frozen = sa.Column(sa.Boolean, default=False)
cluster = relationship(
'Cluster',
backref='services',
foreign_keys=cluster_name,
primaryjoin='and_('
'Service.cluster_name == Cluster.name,'
'Service.deleted == False)',
)
class Cluster(BASE, CinderBase):
"""Represents a cluster of hosts."""
__tablename__ = 'clusters'
# To remove potential races on creation we have a constraint set on name
# and race_preventer fields, and we set value on creation to 0, so 2
# clusters with the same name will fail this constraint. On deletion we
# change this field to the same value as the id which will be unique and
# will not conflict with the creation of another cluster with the same
# name.
__table_args__ = (
sa.UniqueConstraint('name', 'binary', 'race_preventer'),
CinderBase.__table_args__,
)
id = sa.Column(sa.Integer, primary_key=True)
# NOTE(geguileo): Name is constructed in the same way that Server.host but
# using cluster configuration option instead of host.
name = sa.Column(sa.String(255), nullable=False)
binary = sa.Column(sa.String(255), nullable=False)
disabled = sa.Column(sa.Boolean, default=False)
disabled_reason = sa.Column(sa.String(255))
race_preventer = sa.Column(sa.Integer, nullable=False, default=0)
replication_status = sa.Column(sa.String(36), default="not-capable")
active_backend_id = sa.Column(sa.String(255))
frozen = sa.Column(
sa.Boolean,
nullable=False,
default=False,
server_default=expression.false(),
)
# Last heartbeat reported by any of the services of this cluster. This is
# not deferred since we always want to load this field.
last_heartbeat = column_property(
sa.select(func.max(Service.updated_at))
.where(sa.and_(Service.cluster_name == name, ~Service.deleted))
.correlate_except(Service)
.scalar_subquery(),
deferred=False,
)
# Number of existing services for this cluster
num_hosts = column_property(
sa.select(func.count(Service.id))
.where(sa.and_(Service.cluster_name == name, ~Service.deleted))
.correlate_except(Service)
.scalar_subquery(),
group='services_summary',
deferred=True,
)
# Number of services that are down for this cluster
num_down_hosts = column_property(
sa.select(func.count(Service.id))
.where(
sa.and_(
Service.cluster_name == name,
~Service.deleted,
Service.updated_at < sa.bindparam('expired'),
)
)
.correlate_except(Service)
.scalar_subquery(),
group='services_summary',
deferred=True,
)
@staticmethod
def delete_values():
return {
'race_preventer': Cluster.id,
'deleted': True,
'deleted_at': timeutils.utcnow(),
}
class ConsistencyGroup(BASE, CinderBase):
"""Represents a consistencygroup."""
__tablename__ = 'consistencygroups'
id = sa.Column(sa.String(36), primary_key=True)
# TODO(stephenfin): Add nullable=False
user_id = sa.Column(sa.String(255))
project_id = sa.Column(sa.String(255))
cluster_name = sa.Column(sa.String(255), nullable=True)
host = sa.Column(sa.String(255))
availability_zone = sa.Column(sa.String(255))
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
volume_type_id = sa.Column(sa.String(255))
status = sa.Column(sa.String(255))
cgsnapshot_id = sa.Column(sa.String(36))
source_cgid = sa.Column(sa.String(36))
class Group(BASE, CinderBase):
"""Represents a generic volume group."""
__tablename__ = 'groups'
__table_args__ = (
# Speed up normal listings
sa.Index('groups_deleted_project_id_idx', 'deleted', 'project_id'),
CinderBase.__table_args__,
)
id = sa.Column(sa.String(36), primary_key=True)
# TODO(stephenfin): Add nullable=False
user_id = sa.Column(sa.String(255))
project_id = sa.Column(sa.String(255))
cluster_name = sa.Column(sa.String(255))
host = sa.Column(sa.String(255))
availability_zone = sa.Column(sa.String(255))
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
status = sa.Column(sa.String(255))
group_type_id = sa.Column(sa.String(36))
group_snapshot_id = sa.Column(sa.String(36))
source_group_id = sa.Column(sa.String(36))
replication_status = sa.Column(sa.String(255))
class CGSnapshot(BASE, CinderBase):
"""Represents a cgsnapshot."""
__tablename__ = 'cgsnapshots'
id = sa.Column(sa.String(36), primary_key=True)
consistencygroup_id = sa.Column(
sa.String(36),
sa.ForeignKey('consistencygroups.id'),
nullable=False,
index=True,
)
# TODO(stephenfin): Add nullable=False
user_id = sa.Column(sa.String(255))
project_id = sa.Column(sa.String(255))
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
status = sa.Column(sa.String(255))
consistencygroup = relationship(
ConsistencyGroup,
backref="cgsnapshots",
foreign_keys=consistencygroup_id,
primaryjoin='CGSnapshot.consistencygroup_id == ConsistencyGroup.id',
)
class GroupSnapshot(BASE, CinderBase):
"""Represents a group snapshot."""
__tablename__ = 'group_snapshots'
__table_args__ = (
# Speed up normal listings
sa.Index('group_snapshots_deleted_project_id_idx',
'deleted', 'project_id'),
CinderBase.__table_args__,
)
id = sa.Column(sa.String(36), primary_key=True)
group_id = sa.Column(
'group_id',
sa.String(36),
sa.ForeignKey('groups.id'),
nullable=False,
index=True,
)
user_id = sa.Column(sa.String(255))
project_id = sa.Column(sa.String(255))
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
status = sa.Column(sa.String(255))
group_type_id = sa.Column(sa.String(36))
group = relationship(
Group,
backref="group_snapshots",
foreign_keys=group_id,
primaryjoin='GroupSnapshot.group_id == Group.id',
)
class Volume(BASE, CinderBase):
"""Represents a block storage device that can be attached to a vm."""
__tablename__ = 'volumes'
__table_args__ = (
sa.Index('volumes_service_uuid_idx', 'service_uuid', 'deleted'),
# Speed up normal listings
sa.Index('volumes_deleted_project_id_idx', 'deleted', 'project_id'),
# Speed up service start, create volume from image when using direct
# urls, host REST API, and the cinder-manage update host cmd
sa.Index('volumes_deleted_host_idx', 'deleted', 'host'),
CinderBase.__table_args__,
)
id = sa.Column(sa.String(36), primary_key=True)
_name_id = sa.Column(sa.String(36)) # Don't access/modify this directly!
use_quota = Column(
sa.Boolean,
nullable=False,
default=True,
server_default=sa.true(),
doc='Ignore volume in quota usage',
)
@property
def name_id(self):
return self.id if not self._name_id else self._name_id
@name_id.setter
def name_id(self, value):
self._name_id = value
@property
def name(self):
return CONF.volume_name_template % self.name_id
ec2_id = sa.Column(sa.String(255))
user_id = sa.Column(sa.String(255))
project_id = sa.Column(sa.String(255))
snapshot_id = sa.Column(sa.String(36))
cluster_name = sa.Column(sa.String(255), nullable=True)
host = sa.Column(sa.String(255)) # , sa.ForeignKey('hosts.id'))
size = sa.Column(sa.Integer)
availability_zone = sa.Column(sa.String(255)) # TODO(vish): foreign key?
status = sa.Column(sa.String(255)) # TODO(vish): enum?
attach_status = sa.Column(sa.String(255)) # TODO(vish): enum
migration_status = sa.Column(sa.String(255))
scheduled_at = sa.Column(sa.DateTime)
launched_at = sa.Column(sa.DateTime)
terminated_at = sa.Column(sa.DateTime)
display_name = sa.Column(sa.String(255))
display_description = sa.Column(sa.String(255))
provider_location = sa.Column(sa.String(256))
provider_auth = sa.Column(sa.String(256))
provider_geometry = sa.Column(sa.String(255))
provider_id = sa.Column(sa.String(255))
volume_type_id = sa.Column(sa.String(36), nullable=False)
source_volid = sa.Column(sa.String(36))
encryption_key_id = sa.Column(sa.String(36))
consistencygroup_id = sa.Column(
sa.String(36),
sa.ForeignKey('consistencygroups.id'),
index=True,
)
group_id = sa.Column(
'group_id',
sa.String(36),
sa.ForeignKey('groups.id'),
index=True,
)
bootable = sa.Column(sa.Boolean, default=False)
multiattach = sa.Column(sa.Boolean, default=False)
replication_status = sa.Column(sa.String(255))
replication_extended_status = sa.Column(sa.String(255))
replication_driver_data = sa.Column(sa.String(255))
previous_status = sa.Column(sa.String(255))
consistencygroup = relationship(
ConsistencyGroup,
backref="volumes",
foreign_keys=consistencygroup_id,
primaryjoin='Volume.consistencygroup_id == ConsistencyGroup.id',
)
group = relationship(
Group,
backref="volumes",
foreign_keys=group_id,
primaryjoin='Volume.group_id == Group.id',
)
service_uuid = sa.Column(
sa.String(36),
sa.ForeignKey('services.uuid'),
nullable=True,
)
service = relationship(
Service,
backref="volumes",
foreign_keys=service_uuid,
primaryjoin='Volume.service_uuid == Service.uuid',
)
# True => Do locking when iSCSI initiator doesn't support manual scan
# False => Never do locking
# None => Forced locking regardless of the iSCSI initiator
# make an FK of service?
shared_targets = sa.Column(sa.Boolean, nullable=True, default=True)
class VolumeMetadata(BASE, CinderBase):
"""Represents a metadata key/value pair for a volume."""
__tablename__ = 'volume_metadata'
id = sa.Column(sa.Integer, primary_key=True)
key = sa.Column(sa.String(255))
value = sa.Column(sa.String(255))
volume_id = sa.Column(
sa.String(36), sa.ForeignKey('volumes.id'), nullable=False, index=True
)
volume = relationship(
Volume,
backref="volume_metadata",
foreign_keys=volume_id,
primaryjoin='and_('
'VolumeMetadata.volume_id == Volume.id,'
'VolumeMetadata.deleted == False)',
)
class VolumeAdminMetadata(BASE, CinderBase):
"""Represents an administrator metadata key/value pair for a volume."""
__tablename__ = 'volume_admin_metadata'
id = sa.Column(sa.Integer, primary_key=True)
key = sa.Column(sa.String(255))
value = sa.Column(sa.String(255))
volume_id = sa.Column(
sa.String(36), sa.ForeignKey('volumes.id'), nullable=False, index=True
)
volume = relationship(
Volume,
backref="volume_admin_metadata",
foreign_keys=volume_id,
primaryjoin='and_('
'VolumeAdminMetadata.volume_id == Volume.id,'
'VolumeAdminMetadata.deleted == False)',
)
class VolumeAttachment(BASE, CinderBase):
"""Represents a volume attachment for a vm."""
__tablename__ = 'volume_attachment'
id = sa.Column(sa.String(36), primary_key=True)
volume_id = sa.Column(
sa.String(36), sa.ForeignKey('volumes.id'), nullable=False, index=True
)
volume = relationship(
Volume,
backref="volume_attachment",
foreign_keys=volume_id,
primaryjoin='and_('
'VolumeAttachment.volume_id == Volume.id,'
'VolumeAttachment.deleted == False)',
)
instance_uuid = sa.Column(sa.String(36))
attached_host = sa.Column(sa.String(255))
mountpoint = sa.Column(sa.String(255))
attach_time = sa.Column(sa.DateTime)
detach_time = sa.Column(sa.DateTime)
attach_status = sa.Column(sa.String(255))
attach_mode = sa.Column(sa.String(36))
connection_info = sa.Column(sa.Text)
# Stores a serialized json dict of host connector information from brick.
connector = sa.Column(sa.Text)
@staticmethod
def delete_values():
now = timeutils.utcnow()
return {'deleted': True,
'deleted_at': now,
'attach_status': 'detached',
'detach_time': now}
class VolumeType(BASE, CinderBase):
"""Represent possible volume_types of volumes offered."""
__tablename__ = "volume_types"
id = sa.Column(sa.String(36), primary_key=True)
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
# A reference to qos_specs entity
qos_specs_id = sa.Column(
sa.String(36), sa.ForeignKey('quality_of_service_specs.id'), index=True
)
is_public = sa.Column(sa.Boolean, default=True)
volumes = relationship(
Volume,
backref=backref('volume_type', uselist=False),
foreign_keys=id,
primaryjoin='and_('
'Volume.volume_type_id == VolumeType.id, '
'VolumeType.deleted == False)',
)
class GroupType(BASE, CinderBase):
"""Represent possible group_types of groups offered."""
__tablename__ = "group_types"
id = sa.Column(sa.String(36), primary_key=True)
name = sa.Column(sa.String(255), nullable=False)
description = sa.Column(sa.String(255))
is_public = sa.Column(sa.Boolean, default=True)
groups = relationship(
Group,
backref=backref('group_type', uselist=False),
foreign_keys=id,
primaryjoin='and_('
'Group.group_type_id == GroupType.id, '
'GroupType.deleted == False)',
)
class GroupVolumeTypeMapping(BASE, CinderBase):
"""Represent mapping between groups and volume_types."""
__tablename__ = "group_volume_type_mapping"
id = sa.Column(sa.Integer, primary_key=True, nullable=False)
volume_type_id = sa.Column(
sa.String(36),
sa.ForeignKey('volume_types.id'),
nullable=False,
index=True,
)
group_id = sa.Column(
sa.String(36), sa.ForeignKey('groups.id'), nullable=False, index=True
)
group = relationship(
Group,
backref="volume_types",
foreign_keys=group_id,
primaryjoin='and_('
'GroupVolumeTypeMapping.group_id == Group.id,'
'GroupVolumeTypeMapping.deleted == False)',
)
class VolumeTypeProjects(BASE, CinderBase):
"""Represent projects associated volume_types."""
__tablename__ = "volume_type_projects"
__table_args__ = (
schema.UniqueConstraint(
"volume_type_id",
"project_id",
"deleted",
),
CinderBase.__table_args__,
)
id = sa.Column(sa.Integer, primary_key=True)
# TODO(stephenfin): Add nullable=False
volume_type_id = sa.Column(
sa.String(36), sa.ForeignKey('volume_types.id'),
)
project_id = sa.Column(sa.String(255))
deleted = sa.Column(sa.Integer, default=0)
volume_type = relationship(
VolumeType,
backref="projects",
foreign_keys=volume_type_id,
primaryjoin='and_('
'VolumeTypeProjects.volume_type_id == VolumeType.id,'
'VolumeTypeProjects.deleted == 0)',
)
class GroupTypeProjects(BASE, CinderBase):
"""Represent projects associated group_types."""
__tablename__ = "group_type_projects"
__table_args__ = (
sa.UniqueConstraint('group_type_id', 'project_id', 'deleted'),
CinderBase.__table_args__,
)
id = sa.Column(sa.Integer, primary_key=True)
# TODO(stephenfin): Add nullable=False
group_type_id = sa.Column(
sa.String(36), sa.ForeignKey('group_types.id'),
)
project_id = sa.Column(sa.String(255))
group_type = relationship(
GroupType,
backref="projects",
foreign_keys=group_type_id,
primaryjoin='and_('
'GroupTypeProjects.group_type_id == GroupType.id,'
'GroupTypeProjects.deleted == False)',
)
class VolumeTypeExtraSpecs(BASE, CinderBase):
"""Represents additional specs as key/value pairs for a volume_type."""
__tablename__ = 'volume_type_extra_specs'
id = sa.Column(sa.Integer, primary_key=True)
key = sa.Column(sa.String(255))
value = sa.Column(sa.String(255))
volume_type_id = sa.Column(
sa.String(36),
sa.ForeignKey(
'volume_types.id',
name='volume_type_extra_specs_ibfk_1',
),
nullable=False,
index=True,
)
volume_type = relationship(
VolumeType,
backref="extra_specs",
foreign_keys=volume_type_id,
primaryjoin='and_('
'VolumeTypeExtraSpecs.volume_type_id == VolumeType.id,'
'VolumeTypeExtraSpecs.deleted == False)',
)
class GroupTypeSpecs(BASE, CinderBase):
"""Represents additional specs as key/value pairs for a group_type."""
__tablename__ = 'group_type_specs'
id = sa.Column(sa.Integer, primary_key=True)
key = sa.Column(sa.String(255))
value = sa.Column(sa.String(255))
group_type_id = sa.Column(
sa.String(36),
sa.ForeignKey('group_types.id'),
nullable=False,
index=True,
)
group_type = relationship(
GroupType,
backref="group_specs",
foreign_keys=group_type_id,
primaryjoin='and_('
'GroupTypeSpecs.group_type_id == GroupType.id,'
'GroupTypeSpecs.deleted == False)',
)
class DefaultVolumeTypes(BASE, CinderBase):
"""Represent projects associated volume_types."""
__tablename__ = "default_volume_types"
volume_type_id = sa.Column(
sa.String(36),
sa.ForeignKey('volume_types.id'),
index=True,
)
project_id = sa.Column(sa.String(255), primary_key=True)
volume_type = relationship(
VolumeType,
foreign_keys=volume_type_id,
primaryjoin='DefaultVolumeTypes.volume_type_id == VolumeType.id',
)
class QualityOfServiceSpecs(BASE, CinderBase):
"""Represents QoS specs as key/value pairs.
QoS specs is standalone entity that can be associated/disassociated
with volume types (one to many relation). Adjacency list relationship
pattern is used in this model in order to represent following hierarchical
data with in flat table, e.g, following structure:
.. code-block:: none
qos-specs-1 'Rate-Limit'
|
+------> consumer = 'front-end'
+------> total_bytes_sec = 1048576
+------> total_iops_sec = 500
qos-specs-2 'QoS_Level1'
|
+------> consumer = 'back-end'
+------> max-iops = 1000
+------> min-iops = 200
is represented by:
id specs_id key value
------ -------- ------------- -----
UUID-1 NULL QoSSpec_Name Rate-Limit
UUID-2 UUID-1 consumer front-end
UUID-3 UUID-1 total_bytes_sec 1048576
UUID-4 UUID-1 total_iops_sec 500
UUID-5 NULL QoSSpec_Name QoS_Level1
UUID-6 UUID-5 consumer back-end
UUID-7 UUID-5 max-iops 1000
UUID-8 UUID-5 min-iops 200
"""
__tablename__ = 'quality_of_service_specs'
id = sa.Column(sa.String(36), primary_key=True)
specs_id = sa.Column(sa.String(36), sa.ForeignKey(id), index=True)
key = sa.Column(sa.String(255))
value = sa.Column(sa.String(255))
specs = relationship(
"QualityOfServiceSpecs",
cascade="all, delete-orphan",
backref=backref("qos_spec", remote_side=id),
)
vol_types = relationship(
VolumeType,
backref=backref('qos_specs'),
foreign_keys=id,
primaryjoin='and_('
'or_(VolumeType.qos_specs_id == '
'QualityOfServiceSpecs.id,'
'VolumeType.qos_specs_id == '
'QualityOfServiceSpecs.specs_id),'
'QualityOfServiceSpecs.deleted == False)',
)
class VolumeGlanceMetadata(BASE, CinderBase):
"""Glance metadata for a bootable volume."""
__tablename__ = 'volume_glance_metadata'
id = sa.Column(sa.Integer, primary_key=True, nullable=False)
volume_id = sa.Column(
sa.String(36), sa.ForeignKey('volumes.id'), index=True
)
snapshot_id = sa.Column(
sa.String(36), sa.ForeignKey('snapshots.id'), index=True
)
key = sa.Column(sa.String(255))
value = sa.Column(sa.Text)
volume = relationship(
Volume,
backref="volume_glance_metadata",
foreign_keys=volume_id,
primaryjoin='and_('
'VolumeGlanceMetadata.volume_id == Volume.id,'
'VolumeGlanceMetadata.deleted == False)',
)
class Quota(BASE, CinderBase):
"""Represents a single quota override for a project.
If there is no row for a given project id and resource, then the
default for the quota class is used. If there is no row for a
given quota class and resource, then the default for the
deployment is used. If the row is present but the hard limit is
Null, then the resource is unlimited.
"""
__tablename__ = 'quotas'
id = sa.Column(sa.Integer, primary_key=True)
# TODO(stephenfin): Add index=True
project_id = sa.Column(sa.String(255))
resource = sa.Column(sa.String(300), nullable=False)
hard_limit = sa.Column(sa.Integer, nullable=True)
class QuotaClass(BASE, CinderBase):
"""Represents a single quota override for a quota class.
If there is no row for a given quota class and resource, then the
default for the deployment is used. If the row is present but the
hard limit is Null, then the resource is unlimited.
"""
__tablename__ = 'quota_classes'
id = sa.Column(sa.Integer, primary_key=True)
class_name = sa.Column(sa.String(255), index=True)
resource = sa.Column(sa.String(300))
hard_limit = sa.Column(sa.Integer, nullable=True)
class QuotaUsage(BASE, CinderBase):
"""Represents the current usage for a given resource."""
__tablename__ = 'quota_usages'
# NOTE: project_id and resource are not enough as unique constraint since
# we do soft deletes and there could be duplicated entries, so we add the
# race_preventer field.
__table_args__ = (
sa.Index('quota_usage_project_resource_idx', 'project_id', 'resource'),
sa.UniqueConstraint('project_id', 'resource', 'race_preventer'),
CinderBase.__table_args__,
)
id = sa.Column(sa.Integer, primary_key=True)
project_id = sa.Column(sa.String(255), index=True)
# TODO(stephenfin): Add index=True
resource = sa.Column(sa.String(300))
in_use = sa.Column(sa.Integer, nullable=False)
reserved = sa.Column(sa.Integer, nullable=False)
@property
def total(self):
return self.in_use + self.reserved
until_refresh = sa.Column(sa.Integer, nullable=True)
# To prevent races during creation on quota_reserve method
race_preventer = sa.Column(sa.Boolean, nullable=True, default=True)
@staticmethod
def delete_values():
res = CinderBase.delete_values()
res['race_preventer'] = None
return res
class Reservation(BASE, CinderBase):
"""Represents a resource reservation for quotas."""
__tablename__ = 'reservations'
__table_args__ = (
sa.Index('reservations_deleted_expire_idx', 'deleted', 'expire'),
sa.Index('reservations_deleted_uuid_idx', 'deleted', 'uuid'),
CinderBase.__table_args__,
)
id = sa.Column(sa.Integer, primary_key=True)
uuid = sa.Column(sa.String(36), nullable=False)
usage_id = sa.Column(
sa.Integer, sa.ForeignKey('quota_usages.id'), nullable=True, index=True
)
project_id = sa.Column(sa.String(255), index=True)
resource = sa.Column(sa.String(300))
delta = sa.Column(sa.Integer, nullable=False)
# TODO(stephenfin): Add nullable=False
expire = sa.Column(sa.DateTime)
usage = relationship(
"QuotaUsage",
foreign_keys=usage_id,
primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,'
'QuotaUsage.deleted == False)',
)
class Snapshot(BASE, CinderBase):
"""Represents a snapshot of volume."""
__tablename__ = 'snapshots'
__table_args__ = (
# Speed up normal listings
sa.Index('snapshots_deleted_project_id_idx', 'deleted', 'project_id'),
CinderBase.__table_args__,
)
id = sa.Column(sa.String(36), primary_key=True)
use_quota = Column(
sa.Boolean,
nullable=False,
default=True,
server_default=sa.true(),
doc='Ignore volume in quota usage',
)
@property
def name(self):
return CONF.snapshot_name_template % self.id
@property
def volume_name(self):
return self.volume.name # pylint: disable=E1101
user_id = sa.Column(sa.String(255))
project_id = sa.Column(sa.String(255))
volume_id = sa.Column(
sa.String(36),
sa.ForeignKey('volumes.id', name='snapshots_volume_id_fkey'),
nullable=False,
index=True,
)
cgsnapshot_id = sa.Column(
sa.String(36),
sa.ForeignKey('cgsnapshots.id'),
index=True,
)
group_snapshot_id = sa.Column(
sa.String(36),
sa.ForeignKey('group_snapshots.id'),
index=True,
)
status = sa.Column(sa.String(255))
progress = sa.Column(sa.String(255))
volume_size = sa.Column(sa.Integer)
scheduled_at = sa.Column(sa.DateTime)
display_name = sa.Column(sa.String(255))
display_description = sa.Column(sa.String(255))
encryption_key_id = sa.Column(sa.String(36))
volume_type_id = sa.Column(sa.String(36), nullable=False)
provider_location = sa.Column(sa.String(255))
provider_id = sa.Column(sa.String(255))
provider_auth = sa.Column(sa.String(255))
volume = relationship(
Volume,
backref="snapshots",
foreign_keys=volume_id,
primaryjoin='Snapshot.volume_id == Volume.id',
)
cgsnapshot = relationship(
CGSnapshot,
backref="snapshots",
foreign_keys=cgsnapshot_id,
primaryjoin='Snapshot.cgsnapshot_id == CGSnapshot.id',
)
group_snapshot = relationship(
GroupSnapshot,
backref="snapshots",
foreign_keys=group_snapshot_id,
primaryjoin='Snapshot.group_snapshot_id == GroupSnapshot.id',
)
class SnapshotMetadata(BASE, CinderBase):
"""Represents a metadata key/value pair for a snapshot."""
__tablename__ = 'snapshot_metadata'
id = sa.Column(sa.Integer, primary_key=True)
key = sa.Column(sa.String(255))
value = sa.Column(sa.String(255))
snapshot_id = sa.Column(
sa.String(36),
sa.ForeignKey('snapshots.id'),
nullable=False,
index=True,
)
snapshot = relationship(
Snapshot,
backref="snapshot_metadata",
foreign_keys=snapshot_id,
primaryjoin='and_('
'SnapshotMetadata.snapshot_id == Snapshot.id,'
'SnapshotMetadata.deleted == False)',
)
class Backup(BASE, CinderBase):
"""Represents a backup of a volume to Swift."""
__tablename__ = 'backups'
__table_args__ = (
# Speed up normal listings
sa.Index('backups_deleted_project_id_idx', 'deleted', 'project_id'),
CinderBase.__table_args__,
)
id = sa.Column(sa.String(36), primary_key=True)
# Backups don't have use_quota field since we don't have temporary backups
@property
def name(self):
return CONF.backup_name_template % self.id
# TODO(stephenfin): Add nullable=False
user_id = sa.Column(sa.String(255))
project_id = sa.Column(sa.String(255))
volume_id = sa.Column(sa.String(36), nullable=False)
host = sa.Column(sa.String(255))
availability_zone = sa.Column(sa.String(255))
display_name = sa.Column(sa.String(255))
display_description = sa.Column(sa.String(255))
container = sa.Column(sa.String(255))
parent_id = sa.Column(sa.String(36))
status = sa.Column(sa.String(255))
fail_reason = sa.Column(sa.String(255))
service_metadata = sa.Column(sa.String(255))
service = sa.Column(sa.String(255))
size = sa.Column(sa.Integer)
object_count = sa.Column(sa.Integer)
temp_volume_id = sa.Column(sa.String(36))
temp_snapshot_id = sa.Column(sa.String(36))
num_dependent_backups = sa.Column(sa.Integer)
snapshot_id = sa.Column(sa.String(36))
data_timestamp = sa.Column(sa.DateTime)
restore_volume_id = sa.Column(sa.String(36))
encryption_key_id = sa.Column(sa.String(36))
@validates('fail_reason')
def validate_fail_reason(self, key, fail_reason):
return fail_reason and fail_reason[:255] or ''
class BackupMetadata(BASE, CinderBase):
"""Represents a metadata key/value pair for a backup."""
__tablename__ = 'backup_metadata'
id = sa.Column(sa.Integer, primary_key=True)
key = sa.Column(sa.String(255))
value = sa.Column(sa.String(255))
backup_id = sa.Column(
sa.String(36), sa.ForeignKey('backups.id'), nullable=False, index=True
)
backup = relationship(
Backup,
backref="backup_metadata",
foreign_keys=backup_id,
primaryjoin='and_('
'BackupMetadata.backup_id == Backup.id,'
'BackupMetadata.deleted == False)',
)
class Encryption(BASE, CinderBase):
"""Represents encryption requirement for a volume type.
Encryption here is a set of performance characteristics describing
cipher, provider, and key_size for a certain volume type.
"""
__tablename__ = 'encryption'
# NOTE (smcginnis): nullable=True triggers this to not set a default
# value, but since it's a primary key the resulting schema will end up
# still being NOT NULL. This is avoiding a case in MySQL where it will
# otherwise set this to NOT NULL DEFAULT ''. May be harmless, but
# inconsistent with previous schema.
encryption_id = sa.Column(
sa.String(36),
primary_key=True,
nullable=True,
)
cipher = sa.Column(sa.String(255))
key_size = sa.Column(sa.Integer)
provider = sa.Column(sa.String(255))
control_location = sa.Column(sa.String(255))
# NOTE(joel-coffman): The volume_type_id must be unique or else the
# referenced volume type becomes ambiguous. That is, specifying the
# volume type is not sufficient to identify a particular encryption
# scheme unless each volume type is associated with at most one
# encryption scheme.
# TODO(stephenfin): Make this a foreign key
volume_type_id = sa.Column(sa.String(36), nullable=False)
volume_type = relationship(
VolumeType,
backref="encryption",
foreign_keys=volume_type_id,
primaryjoin='and_('
'Encryption.volume_type_id == VolumeType.id,'
'Encryption.deleted == False)',
)
class Transfer(BASE, CinderBase):
"""Represents a volume transfer request."""
__tablename__ = 'transfers'
id = sa.Column(sa.String(36), primary_key=True)
volume_id = sa.Column(
sa.String(36),
sa.ForeignKey('volumes.id'),
nullable=False,
index=True,
)
display_name = sa.Column(sa.String(255))
salt = sa.Column(sa.String(255))
crypt_hash = sa.Column(sa.String(255))
expires_at = sa.Column(sa.DateTime)
no_snapshots = sa.Column(sa.Boolean, default=False)
source_project_id = sa.Column(sa.String(255), nullable=True)
destination_project_id = sa.Column(sa.String(255), nullable=True)
accepted = sa.Column(sa.Boolean, default=False)
volume = relationship(
Volume,
backref="transfer",
foreign_keys=volume_id,
primaryjoin='and_('
'Transfer.volume_id == Volume.id,'
'Transfer.deleted == False)',
)
class DriverInitiatorData(BASE, models.TimestampMixin, models.ModelBase):
"""Represents private key-value pair specific an initiator for drivers"""
__tablename__ = 'driver_initiator_data'
__table_args__ = (
schema.UniqueConstraint("initiator", "namespace", "key"),
CinderBase.__table_args__,
)
id = sa.Column(sa.Integer, primary_key=True, nullable=False)
initiator = sa.Column(sa.String(255), index=True, nullable=False)
namespace = sa.Column(sa.String(255), nullable=False)
key = sa.Column(sa.String(255), nullable=False)
value = sa.Column(sa.String(255))
class Message(BASE, CinderBase):
"""Represents a message"""
__tablename__ = 'messages'
id = sa.Column(sa.String(36), primary_key=True, nullable=False)
project_id = sa.Column(sa.String(255), nullable=False)
# Info/Error/Warning.
message_level = sa.Column(sa.String(255), nullable=False)
request_id = sa.Column(sa.String(255), nullable=True)
resource_type = sa.Column(sa.String(36))
# The UUID of the related resource.
resource_uuid = sa.Column(sa.String(255), nullable=True)
# Operation specific event ID.
event_id = sa.Column(sa.String(255), nullable=False)
# Message detail ID.
detail_id = sa.Column(sa.String(10), nullable=True)
# Operation specific action.
action_id = sa.Column(sa.String(10), nullable=True)
# After this time the message may no longer exist
expires_at = sa.Column(sa.DateTime, nullable=True, index=True)
class ImageVolumeCacheEntry(BASE, models.ModelBase):
"""Represents an image volume cache entry"""
__tablename__ = 'image_volume_cache_entries'
id = sa.Column(sa.Integer, primary_key=True, nullable=False)
host = sa.Column(sa.String(255), index=True, nullable=False)
cluster_name = sa.Column(sa.String(255), nullable=True)
image_id = sa.Column(sa.String(36), index=True, nullable=False)
# TODO(stephenfin): Add nullable=False
image_updated_at = sa.Column(sa.DateTime)
volume_id = sa.Column(sa.String(36), nullable=False)
size = sa.Column(sa.Integer, nullable=False)
last_used = sa.Column(
sa.DateTime, nullable=False, default=lambda: timeutils.utcnow(),
)
class Worker(BASE, CinderBase):
"""Represents all resources that are being worked on by a node."""
__tablename__ = 'workers'
__table_args__ = (
schema.UniqueConstraint('resource_type', 'resource_id'),
CinderBase.__table_args__,
)
# We want to overwrite default updated_at definition so we timestamp at
# creation as well
updated_at = sa.Column(
sa.DateTime, default=timeutils.utcnow, onupdate=timeutils.utcnow
)
# Id added for convenience and speed on some operations
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
# Type of the resource we are working on (Volume, Snapshot, Backup) it must
# match the Versioned Object class name.
resource_type = sa.Column(sa.String(40), nullable=False)
# UUID of the resource we are working on
resource_id = sa.Column(sa.String(36), nullable=False)
# Status that should be cleaned on service failure
status = sa.Column(sa.String(255), nullable=False)
# Service that is currently processing the operation
service_id = sa.Column(
sa.Integer,
sa.ForeignKey('services.id'),
nullable=True,
index=True,
)
# To prevent claiming and updating races
race_preventer = sa.Column(
sa.Integer,
nullable=False,
default=0,
server_default=sa.text('0'),
)
# This is a flag we don't need to store in the DB as it is only used when
# we are doing the cleanup to let decorators know
cleaning = False
service = relationship(
'Service',
backref="workers",
foreign_keys=service_id,
primaryjoin='Worker.service_id == Service.id',
)
class AttachmentSpecs(BASE, CinderBase):
"""Represents attachment specs as k/v pairs for a volume_attachment.
DO NOT USE - NOTHING SHOULD WRITE NEW DATA TO THIS TABLE
The volume_attachment.connector column should be used instead.
"""
__tablename__ = 'attachment_specs'
id = sa.Column(sa.Integer, primary_key=True)
key = sa.Column(sa.String(255))
value = sa.Column(sa.String(255))
attachment_id = sa.Column(
sa.String(36),
sa.ForeignKey('volume_attachment.id'),
nullable=False,
index=True,
)
volume_attachment = relationship(
VolumeAttachment,
backref="attachment_specs",
foreign_keys=attachment_id,
primaryjoin='and_('
'AttachmentSpecs.attachment_id == VolumeAttachment.id,'
'AttachmentSpecs.deleted == False)',
)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/exception.py 0000664 0000000 0000000 00000077124 15131732575 0022217 0 ustar 00root root 0000000 0000000 # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Cinder base exception handling.
Includes decorator for re-raising Cinder-type exceptions.
SHOULD include dedicated exception logging.
"""
from typing import Optional, Union
from oslo_log import log as logging
from oslo_versionedobjects import exception as obj_exc
import webob.exc
from webob.util import status_generic_reasons
from webob.util import status_reasons
from cinder.i18n import _
LOG = logging.getLogger(__name__)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code: int = 500, title: str = "",
explanation: str = ""):
self.code = code
# There is a strict rule about constructing status line for HTTP:
# '...Status-Line, consisting of the protocol version followed by a
# numeric status code and its associated textual phrase, with each
# element separated by SP characters'
# (http://www.faqs.org/rfcs/rfc2616.html)
# 'code' and 'title' can not be empty because they correspond
# to numeric status code and its associated text
if title:
self.title = title
else:
try:
self.title = status_reasons[self.code]
except KeyError:
generic_code = self.code // 100
self.title = status_generic_reasons[generic_code]
self.explanation = explanation
super(ConvertedException, self).__init__()
class CinderException(Exception):
"""Base Cinder Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
code = 500
headers: dict = {}
safe = False
def __init__(self, message: Optional[Union[str, tuple]] = None, **kwargs):
self.kwargs = kwargs
self.kwargs['message'] = message
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
for k, v in self.kwargs.items():
if isinstance(v, Exception):
# NOTE(tommylikehu): If this is a cinder exception it will
# return the msg object, so we won't be preventing
# translations.
self.kwargs[k] = str(v)
if self._should_format():
try:
message = self.message % kwargs
except Exception:
# NOTE(melwitt): This is done in a separate method so it can be
# monkey-patched during testing to make it a hard failure.
self._log_exception()
message = self.message
elif isinstance(message, Exception):
# NOTE(tommylikehu): If this is a cinder exception it will
# return the msg object, so we won't be preventing
# translations.
message = str(message)
# NOTE(luisg): We put the actual message in 'msg' so that we can access
# it, because if we try to access the message via 'message' it will be
# overshadowed by the class' message attribute
self.msg = message
super(CinderException, self).__init__(message)
# Oslo.messaging use the argument 'message' to rebuild exception
# directly at the rpc client side, therefore we should not use it
# in our keyword arguments, otherwise, the rebuild process will fail
# with duplicate keyword exception.
self.kwargs.pop('message', None)
def _log_exception(self) -> None:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception('Exception in string format operation:')
for name, value in self.kwargs.items():
LOG.error("%(name)s: %(value)s",
{'name': name, 'value': value})
def _should_format(self) -> bool:
return self.kwargs['message'] is None or '%(message)' in self.message
class VolumeBackendAPIException(CinderException):
message = _("Bad or unexpected response from the storage volume "
"backend API: %(data)s")
class VolumeDriverException(CinderException):
message = _("Volume driver reported an error: %(message)s")
class BackupDriverException(CinderException):
message = _("Backup driver reported an error: %(reason)s")
class BackupRestoreCancel(CinderException):
message = _("Canceled backup %(back_id)s restore on volume %(vol_id)s")
class GlanceConnectionFailed(CinderException):
message = _("Connection to glance failed: %(reason)s")
class ProgrammingError(CinderException):
message = _('Programming error in Cinder: %(reason)s')
class NotAuthorized(CinderException):
message = _("Not authorized.")
code = 403
class AdminRequired(NotAuthorized):
message = _("User does not have admin privileges")
class PolicyNotAuthorized(NotAuthorized):
message = _("Policy doesn't allow %(action)s to be performed.")
class ImageNotAuthorized(CinderException):
message = _("Not authorized for image %(image_id)s.")
class DriverNotInitialized(CinderException):
message = _("Volume driver not ready.")
class Invalid(CinderException):
message = _("Unacceptable parameters.")
code = 400
class InvalidSnapshot(Invalid):
message = _("Invalid snapshot: %(reason)s")
class InvalidVolumeAttachMode(Invalid):
message = _("Invalid attaching mode '%(mode)s' for "
"volume %(volume_id)s.")
class VolumeAttached(Invalid):
message = _("Volume %(volume_id)s is still attached, detach volume first.")
class InvalidResults(Invalid):
message = _("The results are invalid.")
class InvalidInput(Invalid):
message = _("Invalid input received: %(reason)s")
class InvalidAvailabilityZone(Invalid):
message = _("Availability zone '%(az)s' is invalid.")
class InvalidTypeAvailabilityZones(Invalid):
message = _("Volume type is only supported in these availability zones: "
"%(az)s")
class InvalidVolumeType(Invalid):
message = _("Invalid volume type: %(reason)s")
class InvalidGroupType(Invalid):
message = _("Invalid group type: %(reason)s")
class InvalidVolume(Invalid):
message = _("Invalid volume: %(reason)s")
class ResourceConflict(Invalid):
message = _("Resource conflict: %(reason)s")
code = 409
class InvalidContentType(Invalid):
message = _("Invalid content type %(content_type)s.")
class InvalidHost(Invalid):
message = _("Invalid host: %(reason)s")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
message = "%(err)s"
class InvalidAuthKey(Invalid):
message = _("Invalid auth key: %(reason)s")
class InvalidConfigurationValue(Invalid):
message = _('Value "%(value)s" is not valid for '
'configuration option "%(option)s"')
class ServiceUnavailable(Invalid):
message = _("Service is unavailable at this time.")
class UnavailableDuringUpgrade(Invalid):
message = _('Cannot perform %(action)s during system upgrade.')
class ImageUnacceptable(Invalid):
message = _("Image %(image_id)s is unacceptable: %(reason)s")
class ImageTooBig(Invalid):
message = _("Image %(image_id)s size exceeded available "
"disk space: %(reason)s")
class DeviceUnavailable(Invalid):
message = _("The device in the path %(path)s is unavailable: %(reason)s")
class SnapshotUnavailable(VolumeBackendAPIException):
message = _("The snapshot is unavailable: %(data)s")
class InvalidUUID(Invalid):
message = _("Expected a UUID but received %(uuid)s.")
class InvalidAPIVersionString(Invalid):
message = _("API Version String %(version)s is of invalid format. Must "
"be of format MajorNum.MinorNum.")
class VersionNotFoundForAPIMethod(Invalid):
message = _("API version %(version)s is not supported on this method.")
class InvalidGlobalAPIVersion(Invalid):
message = _("Version %(req_ver)s is not supported by the API. Minimum "
"is %(min_ver)s and maximum is %(max_ver)s.")
class ValidationError(Invalid):
message = "%(detail)s"
class APIException(CinderException):
message = _("Error while requesting %(service)s API.")
def __init__(self, message=None, **kwargs):
if 'service' not in kwargs:
kwargs['service'] = 'unknown'
super(APIException, self).__init__(message, **kwargs)
class APITimeout(APIException):
message = _("Timeout while requesting %(service)s API.")
class RPCTimeout(CinderException):
message = _("Timeout while requesting capabilities from backend "
"%(service)s.")
code = 502
class Duplicate(CinderException):
pass
class NotFound(CinderException):
message = _("Resource could not be found.")
code = 404
safe = True
class GlanceStoreNotFound(NotFound):
message = _("Store %(store_id)s not enabled in glance.")
class GlanceStoreReadOnly(Invalid):
message = _("Store %(store_id)s is read-only in glance.")
class VolumeNotFound(NotFound):
message = _("Volume %(volume_id)s could not be found.")
class MessageNotFound(NotFound):
message = _("Message %(message_id)s could not be found.")
class VolumeAttachmentNotFound(NotFound):
message = _("Volume attachment could not be found with "
"filter: %(filter)s.")
class VolumeMetadataNotFound(NotFound):
message = _("Volume %(volume_id)s has no metadata with "
"key %(metadata_key)s.")
class InvalidVolumeMetadata(Invalid):
message = _("Invalid metadata: %(reason)s")
class InvalidVolumeMetadataSize(Invalid):
message = _("Invalid metadata size: %(reason)s")
class SnapshotMetadataNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s has no metadata with "
"key %(metadata_key)s.")
class VolumeTypeNotFound(NotFound):
message = _("Volume type %(volume_type_id)s could not be found.")
class VolumeTypeNotFoundByName(VolumeTypeNotFound):
message = _("Volume type with name %(volume_type_name)s "
"could not be found.")
class VolumeTypeAccessNotFound(NotFound):
message = _("Volume type access not found for %(volume_type_id)s / "
"%(project_id)s combination.")
class VolumeTypeExtraSpecsNotFound(NotFound):
message = _("Volume Type %(volume_type_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class VolumeTypeInUse(CinderException):
message = _("Volume Type %(volume_type_id)s deletion is not allowed with "
"volumes present with the type.")
class VolumeTypeDeletionError(Invalid):
message = _("The volume type %(volume_type_id)s is the only currently "
"defined volume type and cannot be deleted.")
class VolumeTypeDefaultDeletionError(Invalid):
message = _("The volume type %(volume_type_id)s is a default volume "
"type and cannot be deleted.")
class VolumeTypeDefaultMisconfiguredError(CinderException):
message = _("The request cannot be fulfilled as the default volume type "
"%(volume_type_name)s cannot be found.")
class VolumeTypeProjectDefaultNotFound(NotFound):
message = _("Default type for project %(project_id)s not found.")
class GroupTypeNotFound(NotFound):
message = _("Group type %(group_type_id)s could not be found.")
class GroupTypeNotFoundByName(GroupTypeNotFound):
message = _("Group type with name %(group_type_name)s "
"could not be found.")
class GroupTypeAccessNotFound(NotFound):
message = _("Group type access not found for %(group_type_id)s / "
"%(project_id)s combination.")
class GroupTypeSpecsNotFound(NotFound):
message = _("Group Type %(group_type_id)s has no specs with "
"key %(group_specs_key)s.")
class GroupTypeInUse(CinderException):
message = _("Group Type %(group_type_id)s deletion is not allowed with "
"groups present with the type.")
class SnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
class ServerNotFound(NotFound):
message = _("Instance %(uuid)s could not be found.")
class VolumeSnapshotNotFound(NotFound):
message = _("No snapshots found for volume %(volume_id)s.")
class VolumeIsBusy(CinderException):
message = _("deleting volume %(volume_name)s that has snapshot")
class SnapshotIsBusy(CinderException):
message = _("deleting snapshot %(snapshot_name)s that has "
"dependent volumes")
class InvalidImageRef(Invalid):
message = _("Invalid image href %(image_href)s.")
class InvalidSignatureImage(Invalid):
message = _("Signature metadata is incomplete for image: "
"%(image_id)s.")
class ImageSignatureVerificationException(CinderException):
message = _("Failed to verify image signature, reason: %(reason)s.")
class ImageNotFound(NotFound):
message = _("Image %(image_id)s could not be found.")
class ServiceNotFound(NotFound):
def __init__(self, message=None, **kwargs):
if not message:
if kwargs.get('host', None):
self.message = _("Service %(service_id)s could not be "
"found on host %(host)s.")
else:
self.message = _("Service %(service_id)s could not be found.")
super(ServiceNotFound, self).__init__(message, **kwargs)
class ServiceTooOld(Invalid):
message = _("Service is too old to fulfil this request.")
class WorkerNotFound(NotFound):
message = _("Worker with %s could not be found.")
def __init__(self, message=None, **kwargs):
keys_list = ('{0}=%({0})s'.format(key) for key in kwargs)
placeholder = ', '.join(keys_list)
self.message = self.message % placeholder
super(WorkerNotFound, self).__init__(message, **kwargs)
class WorkerExists(Duplicate):
message = _("Worker for %(type)s %(id)s already exists.")
class CleanableInUse(Invalid):
message = _('%(type)s with id %(id)s is already being cleaned up or '
'another host has taken over it.')
class ClusterNotFound(NotFound):
message = _('Cluster %(id)s could not be found.')
class ClusterHasHosts(Invalid):
message = _("Cluster %(id)s still has hosts.")
class ClusterExists(Duplicate):
message = _("Cluster %(name)s already exists.")
class HostNotFound(NotFound):
message = _("Host %(host)s could not be found.")
class SchedulerHostFilterNotFound(NotFound):
message = _("Scheduler Host Filter %(filter_name)s could not be found.")
class SchedulerHostWeigherNotFound(NotFound):
message = _("Scheduler Host Weigher %(weigher_name)s could not be found.")
class InvalidReservationExpiration(Invalid):
message = _("Invalid reservation expiration %(expire)s.")
class InvalidQuotaValue(Invalid):
message = _("Change would make usage less than 0 for the following "
"resources: %(unders)s")
class QuotaNotFound(NotFound):
message = _("Quota could not be found")
class QuotaResourceUnknown(QuotaNotFound):
message = _("Unknown quota resources %(unknown)s.")
class ProjectQuotaNotFound(QuotaNotFound):
message = _("Quota for project %(project_id)s could not be found.")
class QuotaClassNotFound(QuotaNotFound):
message = _("Quota class %(class_name)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound):
message = _("Quota usage for project %(project_id)s could not be found.")
class OverQuota(CinderException):
message = _("Quota exceeded for resources: %(overs)s")
class FileNotFound(NotFound):
message = _("File %(file_path)s could not be found.")
class VolumeTypeExists(Duplicate):
message = _("Volume Type %(id)s already exists.")
class VolumeTypeAccessExists(Duplicate):
message = _("Volume type access for %(volume_type_id)s / "
"%(project_id)s combination already exists.")
class VolumeTypeEncryptionExists(Invalid):
message = _("Volume type encryption for type %(type_id)s already exists.")
class VolumeTypeEncryptionNotFound(NotFound):
message = _("Volume type encryption for type %(type_id)s does not exist.")
class GroupTypeExists(Duplicate):
message = _("Group Type %(id)s already exists.")
class GroupTypeAccessExists(Duplicate):
message = _("Group type access for %(group_type_id)s / "
"%(project_id)s combination already exists.")
class GroupVolumeTypeMappingExists(Duplicate):
message = _("Group volume type mapping for %(group_id)s / "
"%(volume_type_id)s combination already exists.")
class MalformedRequestBody(CinderException):
message = _("Malformed message body: %(reason)s")
class ConfigNotFound(NotFound):
message = _("Could not find config at %(path)s")
class ParameterNotFound(NotFound):
message = _("Could not find parameter %(param)s")
class NoValidBackend(CinderException):
message = _("No valid backend was found. %(reason)s")
class QuotaError(CinderException):
message = _("Quota exceeded: code=%(code)s")
code = 413
headers = {'Retry-After': '0'}
safe = True
class VolumeSizeExceedsAvailableQuota(QuotaError):
message = _("Requested volume or snapshot exceeds allowed %(name)s "
"quota. Requested %(requested)sG, quota is %(quota)sG and "
"%(consumed)sG has been consumed.")
def __init__(self, message=None, **kwargs):
kwargs.setdefault('name', 'gigabytes')
super(VolumeSizeExceedsAvailableQuota, self).__init__(
message, **kwargs)
class VolumeSizeExceedsLimit(QuotaError):
message = _("Requested volume size %(size)dG is larger than "
"maximum allowed limit %(limit)dG.")
class VolumeBackupSizeExceedsAvailableQuota(QuotaError):
message = _("Requested backup exceeds allowed Backup gigabytes "
"quota. Requested %(requested)sG, quota is %(quota)sG and "
"%(consumed)sG has been consumed.")
class VolumeLimitExceeded(QuotaError):
message = _("Maximum number of volumes allowed (%(allowed)d) exceeded for "
"quota '%(name)s'.")
def __init__(self, message=None, **kwargs):
kwargs.setdefault('name', 'volumes')
super(VolumeLimitExceeded, self).__init__(message, **kwargs)
class SnapshotLimitExceeded(QuotaError):
message = _("Maximum number of snapshots allowed (%(allowed)d) exceeded")
class UnexpectedOverQuota(QuotaError):
message = _("Unexpected over quota on %(name)s.")
class BackupLimitExceeded(QuotaError):
message = _("Maximum number of backups allowed (%(allowed)d) exceeded")
class ImageLimitExceeded(QuotaError):
message = _("Image quota exceeded")
class VolumeTypeCreateFailed(CinderException):
message = _("Cannot create volume_type with "
"name %(name)s and specs %(extra_specs)s")
class VolumeTypeUpdateFailed(CinderException):
message = _("Cannot update volume_type %(id)s")
class GroupTypeCreateFailed(CinderException):
message = _("Cannot create group_type with "
"name %(name)s and specs %(group_specs)s")
class GroupTypeUpdateFailed(CinderException):
message = _("Cannot update group_type %(id)s")
class GroupLimitExceeded(QuotaError):
message = _("Maximum number of groups allowed (%(allowed)d) exceeded")
class UnknownCmd(VolumeDriverException):
message = _("Unknown or unsupported command %(cmd)s")
class MalformedResponse(VolumeDriverException):
message = _("Malformed response to command %(cmd)s: %(reason)s")
class FailedCmdWithDump(VolumeDriverException):
message = _("Operation failed with status=%(status)s. Full dump: %(data)s")
class InvalidConnectorException(VolumeDriverException):
message = _("Connector doesn't have required information: %(missing)s")
class GlanceMetadataExists(Invalid):
message = _("Glance metadata cannot be updated, key %(key)s"
" exists for volume id %(volume_id)s")
class GlanceMetadataNotFound(NotFound):
message = _("Glance metadata for volume/snapshot %(id)s cannot be found.")
class ImageDownloadFailed(CinderException):
message = _("Failed to download image %(image_href)s, reason: %(reason)s")
class ExportFailure(Invalid):
message = _("Failed to export for volume: %(reason)s")
class RemoveExportException(VolumeDriverException):
message = _("Failed to remove export for volume %(volume)s: %(reason)s")
class MetadataUpdateFailure(Invalid):
message = _("Failed to update metadata for volume: %(reason)s")
class MetadataCopyFailure(Invalid):
message = _("Failed to copy metadata to volume: %(reason)s")
class InvalidMetadataType(Invalid):
message = _("The type of metadata: %(metadata_type)s for volume/snapshot "
"%(id)s is invalid.")
class ImageCopyFailure(Invalid):
message = _("Failed to copy image to volume: %(reason)s")
class BackupInvalidCephArgs(BackupDriverException):
message = _("Invalid Ceph args provided for backup rbd operation")
class BackupOperationError(Invalid):
message = _("An error has occurred during backup operation")
class BackupMetadataUnsupportedVersion(BackupDriverException):
message = _("Unsupported backup metadata version requested")
class BackupMetadataNotFound(NotFound):
message = _("Backup %(backup_id)s has no metadata with "
"key %(metadata_key)s.")
class VolumeMetadataBackupExists(BackupDriverException):
message = _("Metadata backup already exists for this volume")
class BackupRBDOperationFailed(BackupDriverException):
message = _("Backup RBD operation failed")
class EncryptedBackupOperationFailed(BackupDriverException):
message = _("Backup operation of an encrypted volume failed.")
class BackupNotFound(NotFound):
message = _("Backup %(backup_id)s could not be found.")
class InvalidBackup(Invalid):
message = _("Invalid backup: %(reason)s")
class SwiftConnectionFailed(BackupDriverException):
message = _("Connection to swift failed: %(reason)s")
class TransferNotFound(NotFound):
message = _("Transfer %(transfer_id)s could not be found.")
class VolumeMigrationFailed(CinderException):
message = _("Volume migration failed: %(reason)s")
class SSHInjectionThreat(CinderException):
message = _("SSH command injection detected: %(command)s")
class QoSSpecsExists(Duplicate):
message = _("QoS Specs %(specs_id)s already exists.")
class QoSSpecsCreateFailed(CinderException):
message = _("Failed to create qos_specs: "
"%(name)s with specs %(qos_specs)s.")
class QoSSpecsUpdateFailed(CinderException):
message = _("Failed to update qos_specs: "
"%(specs_id)s with specs %(qos_specs)s.")
class QoSSpecsNotFound(NotFound):
message = _("No such QoS spec %(specs_id)s.")
class QoSSpecsAssociateFailed(CinderException):
message = _("Failed to associate qos_specs: "
"%(specs_id)s with type %(type_id)s.")
class QoSSpecsDisassociateFailed(CinderException):
message = _("Failed to disassociate qos_specs: "
"%(specs_id)s with type %(type_id)s.")
class QoSSpecsKeyNotFound(NotFound):
message = _("QoS spec %(specs_id)s has no spec with "
"key %(specs_key)s.")
class InvalidQoSSpecs(Invalid):
message = _("Invalid qos specs: %(reason)s")
class QoSSpecsInUse(CinderException):
message = _("QoS Specs %(specs_id)s is still associated with entities.")
class KeyManagerError(CinderException):
message = _("key manager error: %(reason)s")
class ManageExistingInvalidReference(CinderException):
message = _("Manage existing volume failed due to invalid backend "
"reference %(existing_ref)s: %(reason)s")
class ManageExistingAlreadyManaged(CinderException):
message = _("Unable to manage existing volume. "
"Volume %(volume_ref)s already managed.")
class InvalidReplicationTarget(Invalid):
message = _("Invalid Replication Target: %(reason)s")
class UnableToFailOver(CinderException):
message = _("Unable to failover to replication target: %(reason)s).")
class ReplicationError(CinderException):
message = _("Volume %(volume_id)s replication "
"error: %(reason)s")
class ReplicationGroupError(CinderException):
message = _("Group %(group_id)s replication "
"error: %(reason)s.")
class ManageExistingVolumeTypeMismatch(CinderException):
message = _("Manage existing volume failed due to volume type mismatch: "
"%(reason)s")
class ExtendVolumeError(CinderException):
message = _("Error extending volume: %(reason)s")
class EvaluatorParseException(Exception):
message = _("Error during evaluator parsing: %(reason)s")
class LockCreationFailed(CinderException):
message = _('Unable to create lock. Coordination backend not started.')
OrphanedObjectError = obj_exc.OrphanedObjectError
ObjectActionError = obj_exc.ObjectActionError
class CappedVersionUnknown(CinderException):
message = _("Unrecoverable Error: Versioned Objects in DB are capped to "
"unknown version %(version)s. Most likely your environment "
"contains only new services and you're trying to start an "
"older one. Use `cinder-manage service list` to check that "
"and upgrade this service.")
class VolumeGroupNotFound(CinderException):
message = _('Unable to find Volume Group: %(vg_name)s')
class VolumeGroupCreationFailed(CinderException):
message = _('Failed to create Volume Group: %(vg_name)s')
class VolumeNotDeactivated(CinderException):
message = _('Volume %(name)s was not deactivated in time.')
class VolumeDeviceNotFound(CinderException):
message = _('Volume device not found at %(device)s.')
# RemoteFS drivers
class RemoteFSException(VolumeDriverException):
message = _("Unknown RemoteFS exception")
class RemoteFSConcurrentRequest(RemoteFSException):
message = _("A concurrent, possibly contradictory, request "
"has been made.")
class RemoteFSNoSharesMounted(RemoteFSException):
message = _("No mounted shares found")
class RemoteFSNoSuitableShareFound(RemoteFSException):
message = _("There is no share which can host %(volume_size)sG")
class RemoteFSInvalidBackingFile(VolumeDriverException):
message = _("File %(path)s has invalid backing file %(backing_file)s.")
# NFS driver
class NfsException(RemoteFSException):
message = _("Unknown NFS exception")
class NfsNoSharesMounted(RemoteFSNoSharesMounted):
message = _("No mounted NFS shares found")
class NfsNoSuitableShareFound(RemoteFSNoSuitableShareFound):
message = _("There is no share which can host %(volume_size)sG")
# Fibre Channel Zone Manager
class ZoneManagerException(CinderException):
message = _("Fibre Channel connection control failure: %(reason)s")
class FCZoneDriverException(CinderException):
message = _("Fibre Channel Zone operation failed: %(reason)s")
class FCSanLookupServiceException(CinderException):
message = _("Fibre Channel SAN Lookup failure: %(reason)s")
class ZoneManagerNotInitialized(CinderException):
message = _("Fibre Channel Zone Manager not initialized")
# ConsistencyGroup
class ConsistencyGroupNotFound(NotFound):
message = _("ConsistencyGroup %(consistencygroup_id)s could not be found.")
class InvalidConsistencyGroup(Invalid):
message = _("Invalid ConsistencyGroup: %(reason)s")
# Group
class GroupNotFound(NotFound):
message = _("Group %(group_id)s could not be found.")
class InvalidGroup(Invalid):
message = _("Invalid Group: %(reason)s")
class InvalidGroupStatus(Invalid):
message = _("Invalid Group Status: %(reason)s")
# CgSnapshot
class CgSnapshotNotFound(NotFound):
message = _("CgSnapshot %(cgsnapshot_id)s could not be found.")
class InvalidCgSnapshot(Invalid):
message = _("Invalid CgSnapshot: %(reason)s")
# GroupSnapshot
class GroupSnapshotNotFound(NotFound):
message = _("GroupSnapshot %(group_snapshot_id)s could not be found.")
class InvalidGroupSnapshot(Invalid):
message = _("Invalid GroupSnapshot: %(reason)s")
class InvalidGroupSnapshotStatus(Invalid):
message = _("Invalid GroupSnapshot Status: %(reason)s")
# Target drivers
class ISCSITargetCreateFailed(CinderException):
message = _("Failed to create iscsi target for volume %(volume_id)s.")
class ISCSITargetRemoveFailed(CinderException):
message = _("Failed to remove iscsi target for volume %(volume_id)s.")
class ISCSITargetAttachFailed(CinderException):
message = _("Failed to attach iSCSI target for volume %(volume_id)s.")
class ISCSITargetDetachFailed(CinderException):
message = _("Failed to detach iSCSI target for volume %(volume_id)s.")
class TargetUpdateFailed(CinderException):
message = _("Failed to update target for volume %(volume_id)s.")
class ISCSITargetHelperCommandFailed(CinderException):
message = "%(error_message)s"
class BadHTTPResponseStatus(VolumeDriverException):
message = _("Bad HTTP response status %(status)s")
class BadResetResourceStatus(CinderException):
message = _("Bad reset resource status : %(reason)s")
class MetadataAbsent(CinderException):
message = _("There is no metadata in DB object.")
class NotSupportedOperation(Invalid):
message = _("Operation not supported: %(operation)s.")
code = 405
class AttachmentSpecsNotFound(NotFound):
message = _("Attachment %(attachment_id)s has no "
"key %(specs_key)s.")
class InvalidName(Invalid):
message = _("An invalid 'name' value was provided. %(reason)s")
class ServiceUserTokenNoAuth(CinderException):
message = _("The [service_user] send_service_user_token option was "
"requested, but no service auth could be loaded. Please check "
"the [service_user] configuration section.")
class RekeyNotSupported(CinderException):
message = _("Rekey not supported.")
class ImageCompressionNotAllowed(CinderException):
message = _("Image compression upload disallowed, but container_format "
"is compressed")
class ImageConversionNotAllowed(CinderException):
message = _("Image Conversion disallowed for image %(image_id)s: "
"%(reason)s")
class CinderAcceleratorError(CinderException):
message = _("Cinder accelerator %(accelerator)s encountered an error "
"while compressing/decompressing image.\n"
"Command %(cmd)s execution failed.\n"
"%(description)s\n"
"Reason: %(reason)s")
class SnapshotLimitReached(CinderException):
message = _("Exceeded the configured limit of "
"%(set_limit)s snapshots per volume.")
class DriverInitiatorDataExists(Duplicate):
message = _(
"Driver initiator data for initiator '%(initiator)s' and backend "
"'%(namespace)s' with key '%(key)s' already exists."
)
class RequirementMissing(CinderException):
message = _('Requirement %(req)s is not installed.')
class ConflictNovaUsingAttachment(CinderException):
message = _("Detach volume from instance %(instance_id)s using the "
"Compute API")
code = 409
safe = True
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/flow_utils.py 0000664 0000000 0000000 00000006404 15131732575 0022401 0 ustar 00root root 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from typing import Any, Optional
from oslo_log import log as logging
# For more information please visit: https://wiki.openstack.org/wiki/TaskFlow
from taskflow import formatters
from taskflow.listeners import base
from taskflow.listeners import logging as logging_listener
from taskflow import task
from cinder import exception
LOG = logging.getLogger(__name__)
def _make_task_name(cls, addons: Optional[list[str]] = None) -> str:
"""Makes a pretty name for a task class."""
base_name = ".".join([cls.__module__, cls.__name__])
extra = ''
if addons:
extra = ';%s' % (", ".join([str(a) for a in addons]))
return base_name + extra
class CinderTask(task.Task):
"""The root task class for all cinder tasks.
It automatically names the given task using the module and class that
implement the given task as the task name.
"""
def __init__(self,
addons: Optional[list[str]] = None,
**kwargs: Any) -> None:
super(CinderTask, self).__init__(self.make_name(addons), **kwargs)
@classmethod
def make_name(cls, addons: Optional[list[str]] = None) -> str:
return _make_task_name(cls, addons)
class SpecialFormatter(formatters.FailureFormatter):
#: Exception is an excepted case, don't include traceback in log if fails.
_NO_TRACE_EXCEPTIONS = (exception.InvalidInput, exception.QuotaError)
def __init__(self, engine):
super(SpecialFormatter, self).__init__(engine)
def format(self, fail, atom_matcher):
if fail.check(*self._NO_TRACE_EXCEPTIONS) is not None:
exc_info = None
exc_details = '%s%s' % (os.linesep, fail.pformat(traceback=False))
return (exc_info, exc_details)
else:
return super(SpecialFormatter, self).format(fail, atom_matcher)
class DynamicLogListener(logging_listener.DynamicLoggingListener):
"""This is used to attach to taskflow engines while they are running.
It provides a bunch of useful features that expose the actions happening
inside a taskflow engine, which can be useful for developers for debugging,
for operations folks for monitoring and tracking of the resource actions
and more...
"""
def __init__(self, engine,
task_listen_for=base.DEFAULT_LISTEN_FOR,
flow_listen_for=base.DEFAULT_LISTEN_FOR,
retry_listen_for=base.DEFAULT_LISTEN_FOR,
logger=LOG):
super(DynamicLogListener, self).__init__(
engine,
task_listen_for=task_listen_for,
flow_listen_for=flow_listen_for,
retry_listen_for=retry_listen_for,
log=logger, fail_formatter=SpecialFormatter(engine))
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/group/ 0000775 0000000 0000000 00000000000 15131732575 0020770 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/group/__init__.py 0000664 0000000 0000000 00000001641 15131732575 0023103 0 ustar 00root root 0000000 0000000 # Copyright (C) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Importing full names to not pollute the namespace and cause possible
# collisions with use of 'from cinder.transfer import ' elsewhere.
from oslo_utils import importutils
from cinder.common import config
CONF = config.CONF
API = importutils.import_class(
CONF.group_api_class)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/group/api.py 0000664 0000000 0000000 00000146123 15131732575 0022122 0 ustar 00root root 0000000 0000000 # Copyright (C) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to groups.
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
from cinder import db
from cinder.db import base
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields as c_fields
from cinder.policies import group_actions as gp_action_policy
from cinder.policies import group_snapshot_actions as gsnap_action_policy
from cinder.policies import group_snapshots as gsnap_policy
from cinder.policies import groups as group_policy
from cinder import quota
from cinder import quota_utils
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder.volume import api as volume_api
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import volume_types
from cinder.volume import volume_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
GROUP_QUOTAS = quota.GROUP_QUOTAS
VALID_REMOVE_VOL_FROM_GROUP_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_ADD_VOL_TO_GROUP_STATUS = (
'available',
'in-use')
class API(base.Base):
"""API for interacting with the volume manager for groups."""
def __init__(self):
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
self.volume_api = volume_api.API()
super().__init__()
def _extract_availability_zone(self, availability_zone):
raw_zones = self.volume_api.list_availability_zones(enable_cache=True)
availability_zones = set([az['name'] for az in raw_zones])
if CONF.storage_availability_zone:
availability_zones.add(CONF.storage_availability_zone)
if availability_zone is None:
if CONF.default_availability_zone:
availability_zone = CONF.default_availability_zone
else:
# For backwards compatibility use the storage_availability_zone
availability_zone = CONF.storage_availability_zone
if availability_zone not in availability_zones:
if CONF.allow_availability_zone_fallback:
original_az = availability_zone
availability_zone = (
CONF.default_availability_zone or
CONF.storage_availability_zone)
LOG.warning("Availability zone '%(s_az)s' not found, falling "
"back to '%(s_fallback_az)s'.",
{'s_az': original_az,
's_fallback_az': availability_zone})
else:
msg = _("Availability zone '%(s_az)s' is invalid.")
msg = msg % {'s_az': availability_zone}
raise exception.InvalidInput(reason=msg)
return availability_zone
def _update_volumes_host(self, context, group):
volumes = objects.VolumeList.get_all_by_generic_group(context,
group.id)
for vol in volumes:
# Update the host field for the volume.
vol.host = group.host
vol.cluster_name = group.cluster_name
vol.save()
def create(self, context, name, description, group_type,
volume_types, availability_zone=None):
context.authorize(group_policy.CREATE_POLICY)
req_volume_types = []
# NOTE: Admin context is required to get extra_specs of volume_types.
req_volume_types = (self.db.volume_types_get_by_name_or_id(
context.elevated(), volume_types))
if not uuidutils.is_uuid_like(group_type):
req_group_type = self.db.group_type_get_by_name(context,
group_type)
else:
try:
req_group_type = self.db.group_type_get(context, group_type)
except exception.GroupTypeNotFound:
# check again if we get this group type by uuid-like name
try:
req_group_type = self.db.group_type_get_by_name(
context, group_type)
except exception.GroupTypeNotFoundByName:
raise exception.GroupTypeNotFound(group_type_id=group_type)
availability_zone = self._extract_availability_zone(availability_zone)
kwargs = {'user_id': context.user_id,
'project_id': context.project_id,
'availability_zone': availability_zone,
'status': c_fields.GroupStatus.CREATING,
'name': name,
'description': description,
'volume_type_ids': [t['id'] for t in req_volume_types],
'group_type_id': req_group_type['id'],
'replication_status': c_fields.ReplicationStatus.DISABLED}
try:
reservations = GROUP_QUOTAS.reserve(context,
project_id=context.project_id,
groups=1)
except exception.OverQuota as e:
quota_utils.process_reserve_over_quota(context, e,
resource='groups')
group = None
try:
group = objects.Group(context=context, **kwargs)
group.create()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Error occurred when creating group"
" %s.", name)
GROUP_QUOTAS.rollback(context, reservations)
request_spec_list = []
filter_properties_list = []
for req_volume_type in req_volume_types:
request_spec = {'volume_type': req_volume_type.copy(),
'group_id': group.id}
filter_properties = {}
request_spec_list.append(request_spec)
filter_properties_list.append(filter_properties)
group_spec = {'group_type': req_group_type.copy(),
'group_id': group.id}
group_filter_properties = {}
# Update quota for groups
GROUP_QUOTAS.commit(context, reservations)
self._cast_create_group(context, group,
group_spec,
request_spec_list,
group_filter_properties,
filter_properties_list)
return group
def create_from_src(self, context, name, description=None,
group_snapshot_id=None, source_group_id=None):
context.authorize(group_policy.CREATE_POLICY)
# Populate group_type_id and volume_type_ids
group_type_id = None
volume_type_ids = []
size = 0
if group_snapshot_id:
grp_snap = self.get_group_snapshot(context, group_snapshot_id)
group_type_id = grp_snap.group_type_id
grp_snap_src_grp = self.get(context, grp_snap.group_id)
volume_type_ids = [vt.id for vt in grp_snap_src_grp.volume_types]
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot_id)
size = sum(s.volume.size for s in snapshots)
elif source_group_id:
source_group = self.get(context, source_group_id)
group_type_id = source_group.group_type_id
volume_type_ids = [vt.id for vt in source_group.volume_types]
source_vols = objects.VolumeList.get_all_by_generic_group(
context, source_group.id)
size = sum(v.size for v in source_vols)
kwargs = {
'user_id': context.user_id,
'project_id': context.project_id,
'status': c_fields.GroupStatus.CREATING,
'name': name,
'description': description,
'group_snapshot_id': group_snapshot_id,
'source_group_id': source_group_id,
'group_type_id': group_type_id,
'volume_type_ids': volume_type_ids,
'replication_status': c_fields.ReplicationStatus.DISABLED
}
try:
reservations = GROUP_QUOTAS.reserve(context,
project_id=context.project_id,
groups=1)
except exception.OverQuota as e:
quota_utils.process_reserve_over_quota(context, e,
resource='groups')
group = None
try:
group = objects.Group(context=context, **kwargs)
group.create(group_snapshot_id=group_snapshot_id,
source_group_id=source_group_id)
except exception.GroupNotFound:
with excutils.save_and_reraise_exception():
LOG.error("Source Group %(source_group)s not found when "
"creating group %(group)s from source.",
{'group': name, 'source_group': source_group_id})
GROUP_QUOTAS.rollback(context, reservations)
except exception.GroupSnapshotNotFound:
with excutils.save_and_reraise_exception():
LOG.error("Group snapshot %(group_snap)s not found when "
"creating group %(group)s from source.",
{'group': name, 'group_snap': group_snapshot_id})
GROUP_QUOTAS.rollback(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Error occurred when creating group"
" %(group)s from group_snapshot %(grp_snap)s.",
{'group': name, 'grp_snap': group_snapshot_id})
GROUP_QUOTAS.rollback(context, reservations)
# Update quota for groups
GROUP_QUOTAS.commit(context, reservations)
# NOTE(tommylikehu): We wrap the size inside of the attribute
# 'volume_properties' as scheduler's filter logic are all designed
# based on this attribute.
kwargs = {'group_id': group.id,
'volume_properties': objects.VolumeProperties(size=size)}
host = group.resource_backend
if not host or not self.scheduler_rpcapi.validate_host_capacity(
context, host, objects.RequestSpec(**kwargs)):
msg = _("No valid host to create group %s.") % group.id
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
group.assert_not_frozen()
if group_snapshot_id:
self._create_group_from_group_snapshot(context, group,
group_snapshot_id)
elif source_group_id:
self._create_group_from_source_group(context, group,
source_group_id)
return group
def _create_group_from_group_snapshot(self, context, group,
group_snapshot_id):
try:
group_snapshot = objects.GroupSnapshot.get_by_id(
context, group_snapshot_id)
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
if not snapshots:
msg = _("Group snapshot is empty. No group will be created.")
raise exception.InvalidGroup(reason=msg)
for snapshot in snapshots:
kwargs = {}
kwargs['availability_zone'] = group.availability_zone
kwargs['group_snapshot'] = group_snapshot
kwargs['group'] = group
kwargs['snapshot'] = snapshot
volume_type_id = snapshot.volume_type_id
if volume_type_id:
kwargs['volume_type'] = (
objects.VolumeType.get_by_name_or_id(
context, volume_type_id))
# Create group volume_type mapping entries
try:
db.group_volume_type_mapping_create(context, group.id,
volume_type_id)
except exception.GroupVolumeTypeMappingExists:
# Only need to create one group volume_type mapping
# entry for the same combination, skipping.
LOG.info("A mapping entry already exists for group"
" %(grp)s and volume type %(vol_type)s. "
"Do not need to create again.",
{'grp': group.id,
'vol_type': volume_type_id})
# Since group snapshot is passed in, the following call will
# create a db entry for the volume, but will not call the
# volume manager to create a real volume in the backend yet.
# If error happens, taskflow will handle rollback of quota
# and removal of volume entry in the db.
try:
self.volume_api.create(context,
snapshot.volume_size,
None,
None,
**kwargs)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.error("Error occurred when creating volume "
"entry from snapshot in the process of "
"creating group %(group)s "
"from group snapshot %(group_snap)s.",
{'group': group.id,
'group_snap': group_snapshot.id})
except Exception:
with excutils.save_and_reraise_exception():
try:
volumes = objects.VolumeList.get_all_by_generic_group(
context, group.id)
for vol in volumes:
# NOTE(tommylikehu): `delete` is used here in order to
# revert consumed quota.
self.volume_api.delete(context, vol)
group.destroy()
finally:
LOG.error("Error occurred when creating group "
"%(group)s from group snapshot %(group_snap)s.",
{'group': group.id,
'group_snap': group_snapshot.id})
self._update_volumes_host(context, group)
self.volume_rpcapi.create_group_from_src(
context, group, group_snapshot)
def _create_group_from_source_group(self, context, group,
source_group_id):
try:
source_group = objects.Group.get_by_id(context,
source_group_id)
source_vols = objects.VolumeList.get_all_by_generic_group(
context, source_group.id)
if not source_vols:
msg = _("Source Group is empty. No group "
"will be created.")
raise exception.InvalidGroup(reason=msg)
for source_vol in source_vols:
kwargs = {}
kwargs['availability_zone'] = group.availability_zone
kwargs['source_group'] = source_group
kwargs['group'] = group
kwargs['source_volume'] = source_vol
volume_type_id = source_vol.volume_type_id
if volume_type_id:
kwargs['volume_type'] = (
objects.VolumeType.get_by_name_or_id(
context, volume_type_id))
# Create group volume_type mapping entries
try:
db.group_volume_type_mapping_create(context, group.id,
volume_type_id)
except exception.GroupVolumeTypeMappingExists:
# Only need to create one group volume_type mapping
# entry for the same combination, skipping.
LOG.info("A mapping entry already exists for group"
" %(grp)s and volume type %(vol_type)s. "
"Do not need to create again.",
{'grp': group.id,
'vol_type': volume_type_id})
# Since source_group is passed in, the following call will
# create a db entry for the volume, but will not call the
# volume manager to create a real volume in the backend yet.
# If error happens, taskflow will handle rollback of quota
# and removal of volume entry in the db.
try:
self.volume_api.create(context,
source_vol.size,
None,
None,
**kwargs)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.error("Error occurred when creating cloned "
"volume in the process of creating "
"group %(group)s from "
"source group %(source_group)s.",
{'group': group.id,
'source_group': source_group.id})
except Exception:
with excutils.save_and_reraise_exception():
try:
volumes = objects.VolumeList.get_all_by_generic_group(
context, group.id)
for vol in volumes:
# NOTE(tommylikehu): `delete` is used here in order to
# revert consumed quota.
self.volume_api.delete(context, vol)
group.destroy()
finally:
LOG.error("Error occurred when creating "
"group %(group)s from source group "
"%(source_group)s.",
{'group': group.id,
'source_group': source_group.id})
self._update_volumes_host(context, group)
self.volume_rpcapi.create_group_from_src(context, group,
None, source_group)
def _cast_create_group(self, context, group,
group_spec,
request_spec_list,
group_filter_properties,
filter_properties_list):
try:
for request_spec in request_spec_list:
volume_type = request_spec.get('volume_type')
volume_type_id = None
if volume_type:
volume_type_id = volume_type.get('id')
specs = {}
if volume_type_id:
qos_specs = volume_types.get_volume_type_qos_specs(
volume_type_id)
specs = qos_specs['qos_specs']
if not specs:
# to make sure we don't pass empty dict
specs = None
volume_properties = {
'size': 0, # Need to populate size for the scheduler
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'attach_status': 'detached',
'encryption_key_id': request_spec.get('encryption_key_id'),
'display_description': request_spec.get('description'),
'display_name': request_spec.get('name'),
'volume_type_id': volume_type_id,
'group_type_id': group.group_type_id,
'availability_zone': group.availability_zone
}
request_spec['volume_properties'] = volume_properties
request_spec['qos_specs'] = specs
group_properties = {
'size': 0, # Need to populate size for the scheduler
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'display_description': group_spec.get('description'),
'display_name': group_spec.get('name'),
'group_type_id': group.group_type_id,
}
group_spec['volume_properties'] = group_properties
group_spec['qos_specs'] = None
except Exception:
with excutils.save_and_reraise_exception():
try:
group.destroy()
finally:
LOG.error("Error occurred when building request spec "
"list for group %s.", group.id)
# Cast to the scheduler and let it handle whatever is needed
# to select the target host for this group.
self.scheduler_rpcapi.create_group(
context,
group,
group_spec=group_spec,
request_spec_list=request_spec_list,
group_filter_properties=group_filter_properties,
filter_properties_list=filter_properties_list)
def update_quota(self, context, group, num, project_id=None):
reserve_opts = {'groups': num}
try:
reservations = GROUP_QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
if reservations:
GROUP_QUOTAS.commit(context, reservations)
except Exception as e:
with excutils.save_and_reraise_exception():
try:
group.destroy()
if isinstance(e, exception.OverQuota):
quota_utils.process_reserve_over_quota(
context, e, resource='groups')
finally:
LOG.error("Failed to update quota for group %s.", group.id)
def delete(self, context, group, delete_volumes=False):
context.authorize(gp_action_policy.DELETE_POLICY, target_obj=group)
if not group.host:
self.update_quota(context, group, -1, group.project_id)
LOG.debug("No host for group %s. Deleting from "
"the database.", group.id)
group.destroy()
return
group.assert_not_frozen()
if not delete_volumes and group.status not in (
[c_fields.GroupStatus.AVAILABLE,
c_fields.GroupStatus.ERROR]):
msg = _("Group status must be available or error, "
"but current status is: %s") % group.status
raise exception.InvalidGroup(reason=msg)
# NOTE(tommylikehu): Admin context is required to load group snapshots.
with group.obj_as_admin():
if group.group_snapshots:
raise exception.InvalidGroup(
reason=_("Group has existing snapshots."))
# TODO(smcginnis): Add conditional update handling for volumes
# Should probably utilize the volume_api.delete code to handle
# cascade snapshot deletion and force delete.
volumes = self.db.volume_get_all_by_generic_group(context.elevated(),
group.id)
if volumes and not delete_volumes:
msg = (_("Group %s still contains volumes. "
"The delete-volumes flag is required to delete it.")
% group.id)
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
volumes_model_update = []
for volume in volumes:
if volume['attach_status'] == "attached":
msg = _("Volume in group %s is attached. "
"Need to detach first.") % group.id
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
snapshots = objects.SnapshotList.get_all_for_volume(context,
volume['id'])
if snapshots:
msg = _("Volume in group still has "
"dependent snapshots.")
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
volumes_model_update.append({'id': volume['id'],
'status': 'deleting'})
self.db.volumes_update(context, volumes_model_update)
if delete_volumes:
# We're overloading the term "delete_volumes" somewhat to also
# mean to delete the group regardless of the state.
expected = {}
else:
expected = {'status': (c_fields.GroupStatus.AVAILABLE,
c_fields.GroupStatus.ERROR)}
filters = [~db.group_has_group_snapshot_filter(),
~db.group_has_volumes_filter(
attached_or_with_snapshots=delete_volumes),
~db.group_creating_from_src(group_id=group.id)]
values = {'status': c_fields.GroupStatus.DELETING}
if not group.conditional_update(values, expected, filters):
if delete_volumes:
reason = _('Group status must be available or error and must '
'not have dependent group snapshots')
else:
reason = _('Group must not have attached volumes, volumes '
'with snapshots, or dependent group snapshots')
msg = _('Cannot delete group %(id)s. %(reason)s, and '
'it cannot be the source for an ongoing group or group '
'snapshot creation.') % {
'id': group.id, 'reason': reason}
raise exception.InvalidGroup(reason=msg)
self.volume_rpcapi.delete_group(context, group)
def update(self, context, group, name, description,
add_volumes, remove_volumes):
"""Update group."""
context.authorize(group_policy.UPDATE_POLICY, target_obj=group)
# Validate name.
if name == group.name:
name = None
# Validate description.
if description == group.description:
description = None
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes = add_volumes.strip(',')
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes = remove_volumes.strip(',')
remove_volumes_list = remove_volumes.split(',')
invalid_uuids = []
for uuid in add_volumes_list:
if uuid in remove_volumes_list:
invalid_uuids.append(uuid)
if invalid_uuids:
msg = _("UUIDs %s are in both add and remove volume "
"list.") % invalid_uuids
raise exception.InvalidVolume(reason=msg)
volumes = self.db.volume_get_all_by_generic_group(context, group.id)
# Validate volumes in add_volumes and remove_volumes.
add_volumes_new = ""
remove_volumes_new = ""
if add_volumes_list:
add_volumes_new = self._validate_add_volumes(
context, volumes, add_volumes_list, group)
if remove_volumes_list:
remove_volumes_new = self._validate_remove_volumes(
volumes, remove_volumes_list, group)
if (name is None and description is None and not add_volumes_new and
not remove_volumes_new):
msg = (_("Cannot update group %(group_id)s "
"because no valid name, description, add_volumes, "
"or remove_volumes were provided.") %
{'group_id': group.id})
raise exception.InvalidGroup(reason=msg)
expected = {}
fields = {'updated_at': timeutils.utcnow()}
# Update name and description in db now. No need to
# to send them over through an RPC call.
if name is not None:
fields['name'] = name
if description is not None:
fields['description'] = description
if not add_volumes_new and not remove_volumes_new:
# Only update name or description. Set status to available.
fields['status'] = c_fields.GroupStatus.AVAILABLE
else:
expected['status'] = c_fields.GroupStatus.AVAILABLE
fields['status'] = c_fields.GroupStatus.UPDATING
if not group.conditional_update(fields, expected):
msg = _("Group status must be available.")
raise exception.InvalidGroup(reason=msg)
# Do an RPC call only if the update request includes
# adding/removing volumes. add_volumes_new and remove_volumes_new
# are strings of volume UUIDs separated by commas with no spaces
# in between.
if add_volumes_new or remove_volumes_new:
self.volume_rpcapi.update_group(
context, group,
add_volumes=add_volumes_new,
remove_volumes=remove_volumes_new)
def _validate_remove_volumes(self, volumes, remove_volumes_list, group):
# Validate volumes in remove_volumes.
remove_volumes_new = ""
for volume in volumes:
if volume['id'] in remove_volumes_list:
if volume['status'] not in VALID_REMOVE_VOL_FROM_GROUP_STATUS:
msg = (_("Cannot remove volume %(volume_id)s from "
"group %(group_id)s because volume "
"is in an invalid state: %(status)s. Valid "
"states are: %(valid)s.") %
{'volume_id': volume['id'],
'group_id': group.id,
'status': volume['status'],
'valid': VALID_REMOVE_VOL_FROM_GROUP_STATUS})
raise exception.InvalidVolume(reason=msg)
# Volume currently in group. It will be removed from group.
if remove_volumes_new:
remove_volumes_new += ","
remove_volumes_new += volume['id']
for rem_vol in remove_volumes_list:
if rem_vol not in remove_volumes_new:
msg = (_("Cannot remove volume %(volume_id)s from "
"group %(group_id)s because it "
"is not in the group.") %
{'volume_id': rem_vol,
'group_id': group.id})
raise exception.InvalidVolume(reason=msg)
return remove_volumes_new
def _validate_add_volumes(self, context, volumes, add_volumes_list, group):
add_volumes_new = ""
for volume in volumes:
if volume['id'] in add_volumes_list:
# Volume already in group. Remove from add_volumes.
add_volumes_list.remove(volume['id'])
for add_vol in add_volumes_list:
try:
add_vol_ref = objects.Volume.get_by_id(context, add_vol)
except exception.VolumeNotFound:
msg = (_("Cannot add volume %(volume_id)s to "
"group %(group_id)s because volume cannot be "
"found.") %
{'volume_id': add_vol,
'group_id': group.id})
raise exception.InvalidVolume(reason=msg)
orig_group = add_vol_ref.group_id
if orig_group:
# If volume to be added is already in the group to be updated,
# it should have been removed from the add_volumes_list in the
# beginning of this function. If we are here, it means it is
# in a different group.
msg = (_("Cannot add volume %(volume_id)s to group "
"%(group_id)s because it is already in "
"group %(orig_group)s.") %
{'volume_id': add_vol_ref.id,
'group_id': group.id,
'orig_group': orig_group})
raise exception.InvalidVolume(reason=msg)
if add_vol_ref:
if add_vol_ref.project_id != group.project_id:
msg = (_("Cannot add volume %(volume_id)s to group "
"%(group_id)s as they belong to different "
"projects.") %
{'volume_id': add_vol_ref.id,
'group_id': group.id})
raise exception.InvalidVolume(reason=msg)
add_vol_type_id = add_vol_ref.volume_type_id
if not add_vol_type_id:
msg = (_("Cannot add volume %(volume_id)s to group "
"%(group_id)s because it has no volume "
"type.") %
{'volume_id': add_vol_ref.id,
'group_id': group.id})
raise exception.InvalidVolume(reason=msg)
vol_type_ids = [v_type.id for v_type in group.volume_types]
if add_vol_type_id not in vol_type_ids:
msg = (_("Cannot add volume %(volume_id)s to group "
"%(group_id)s because volume type "
"%(volume_type)s is not supported by the "
"group.") %
{'volume_id': add_vol_ref.id,
'group_id': group.id,
'volume_type': add_vol_type_id})
raise exception.InvalidVolume(reason=msg)
if (add_vol_ref.status not in
VALID_ADD_VOL_TO_GROUP_STATUS):
msg = (_("Cannot add volume %(volume_id)s to group "
"%(group_id)s because volume is in an "
"invalid state: %(status)s. Valid states are: "
"%(valid)s.") %
{'volume_id': add_vol_ref.id,
'group_id': group.id,
'status': add_vol_ref.status,
'valid': VALID_ADD_VOL_TO_GROUP_STATUS})
raise exception.InvalidVolume(reason=msg)
# group.resource_backend and add_vol_ref.resource_backend are
# in this format like 'host@backend#pool' in a non-HA
# deployment and will contain cluster_name in
# A/A HA deployment.
vol_host = volume_utils.extract_host(
add_vol_ref.resource_backend)
group_host = volume_utils.extract_host(group.resource_backend)
if group_host != vol_host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node."))
# Volume exists. It will be added to CG.
if add_volumes_new:
add_volumes_new += ","
add_volumes_new += add_vol_ref.id
else:
msg = (_("Cannot add volume %(volume_id)s to group "
"%(group_id)s because volume does not exist.") %
{'volume_id': add_vol_ref.id,
'group_id': group.id})
raise exception.InvalidVolume(reason=msg)
return add_volumes_new
def get(self, context, group_id):
group = objects.Group.get_by_id(context, group_id)
context.authorize(group_policy.GET_POLICY, target_obj=group)
return group
def get_all(self, context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
context.authorize(group_policy.GET_ALL_POLICY)
if filters is None:
filters = {}
if filters:
LOG.debug("Searching by: %s", filters)
if (context.is_admin and 'all_tenants' in filters):
del filters['all_tenants']
groups = objects.GroupList.get_all(
context, filters=filters, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs)
else:
groups = objects.GroupList.get_all_by_project(
context, context.project_id, filters=filters, marker=marker,
limit=limit, offset=offset, sort_keys=sort_keys,
sort_dirs=sort_dirs)
return groups
def reset_status(self, context, group, status):
"""Reset status of generic group"""
context.authorize(gp_action_policy.RESET_STATUS, target_obj=group)
field = {'updated_at': timeutils.utcnow(),
'status': status}
group.update(field)
group.save()
def create_group_snapshot(self, context, group, name, description):
context.authorize(gsnap_policy.CREATE_POLICY, target_obj=group)
group.assert_not_frozen()
options = {'group_id': group.id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': "creating",
'name': name,
'description': description,
'group_type_id': group.group_type_id}
group_snapshot = None
group_snapshot_id = None
try:
group_snapshot = objects.GroupSnapshot(context, **options)
group_snapshot.create()
group_snapshot_id = group_snapshot.id
snap_name = group_snapshot.name
snap_desc = group_snapshot.description
with group.obj_as_admin():
self.volume_api.create_snapshots_in_db(
context, group.volumes, snap_name, snap_desc,
None, group_snapshot_id)
except Exception:
with excutils.save_and_reraise_exception():
try:
# If the group_snapshot has been created
if group_snapshot.obj_attr_is_set('id'):
group_snapshot.destroy()
finally:
LOG.error("Error occurred when creating group_snapshot"
" %s.", group_snapshot_id)
self.volume_rpcapi.create_group_snapshot(context, group_snapshot)
return group_snapshot
def delete_group_snapshot(self, context, group_snapshot, force=False):
context.authorize(gsnap_policy.DELETE_POLICY,
target_obj=group_snapshot)
group_snapshot.assert_not_frozen()
values = {'status': 'deleting'}
expected = {'status': ('available', 'error')}
filters = [~db.group_creating_from_src(
group_snapshot_id=group_snapshot.id)]
res = group_snapshot.conditional_update(values, expected, filters)
if not res:
msg = _('GroupSnapshot status must be available or error, and no '
'Group can be currently using it as source for its '
'creation.')
raise exception.InvalidGroupSnapshot(reason=msg)
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
# TODO(xyang): Add a new db API to update all snapshots statuses
# in one db API call.
for snap in snapshots:
snap.status = c_fields.SnapshotStatus.DELETING
snap.save()
self.volume_rpcapi.delete_group_snapshot(context.elevated(),
group_snapshot)
def update_group_snapshot(self, context, group_snapshot, fields):
context.authorize(gsnap_policy.UPDATE_POLICY,
target_obj=group_snapshot)
group_snapshot.update(fields)
group_snapshot.save()
def get_group_snapshot(self, context, group_snapshot_id):
group_snapshot = objects.GroupSnapshot.get_by_id(context,
group_snapshot_id)
context.authorize(gsnap_policy.GET_POLICY,
target_obj=group_snapshot)
return group_snapshot
def get_all_group_snapshots(self, context, filters=None, marker=None,
limit=None, offset=None, sort_keys=None,
sort_dirs=None):
context.authorize(gsnap_policy.GET_ALL_POLICY)
filters = filters or {}
if context.is_admin and 'all_tenants' in filters:
# Need to remove all_tenants to pass the filtering below.
del filters['all_tenants']
group_snapshots = objects.GroupSnapshotList.get_all(
context, filters=filters, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs)
else:
group_snapshots = objects.GroupSnapshotList.get_all_by_project(
context.elevated(), context.project_id, filters=filters,
marker=marker, limit=limit, offset=offset, sort_keys=sort_keys,
sort_dirs=sort_dirs)
return group_snapshots
def reset_group_snapshot_status(self, context, gsnapshot, status):
"""Reset status of group snapshot"""
context.authorize(gsnap_action_policy.RESET_STATUS,
target_obj=gsnapshot)
field = {'updated_at': timeutils.utcnow(),
'status': status}
gsnapshot.update(field)
gsnapshot.save()
def _check_type(self, group):
if not group.is_replicated:
msg = _("Group %s is not a replication group type.") % group.id
LOG.error(msg)
raise exception.InvalidGroupType(reason=msg)
for vol_type in group.volume_types:
if not volume_utils.is_replicated_spec(vol_type.extra_specs):
msg = _("Volume type %s does not have 'replication_enabled' "
"spec key set to ' True'.") % vol_type.id
LOG.error(msg)
raise exception.InvalidVolumeType(reason=msg)
# Replication group API (Tiramisu)
def enable_replication(self, context, group):
context.authorize(gp_action_policy.ENABLE_REP, target_obj=group)
self._check_type(group)
valid_status = [c_fields.GroupStatus.AVAILABLE]
if group.status not in valid_status:
params = {'valid': valid_status,
'current': group.status,
'id': group.id}
msg = _("Group %(id)s status must be %(valid)s, "
"but current status is: %(current)s. "
"Cannot enable replication.") % params
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
valid_rep_status = [c_fields.ReplicationStatus.DISABLED,
c_fields.ReplicationStatus.ENABLED]
if group.replication_status not in valid_rep_status:
params = {'valid': valid_rep_status,
'current': group.replication_status,
'id': group.id}
msg = _("Group %(id)s replication status must be %(valid)s, "
"but current status is: %(current)s. "
"Cannot enable replication.") % params
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
volumes = objects.VolumeList.get_all_by_generic_group(
context.elevated(), group.id)
valid_status = ['available', 'in-use']
for vol in volumes:
if vol.status not in valid_status:
params = {'valid': valid_status,
'current': vol.status,
'id': vol.id}
msg = _("Volume %(id)s status must be %(valid)s, "
"but current status is: %(current)s. "
"Cannot enable replication.") % params
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# replication_status could be set to enabled when volume is
# created and the mirror is built.
if vol.replication_status not in valid_rep_status:
params = {'valid': valid_rep_status,
'current': vol.replication_status,
'id': vol.id}
msg = _("Volume %(id)s replication status must be %(valid)s, "
"but current status is: %(current)s. "
"Cannot enable replication.") % params
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
vol.replication_status = c_fields.ReplicationStatus.ENABLING
vol.save()
group.replication_status = c_fields.ReplicationStatus.ENABLING
group.save()
self.volume_rpcapi.enable_replication(context, group)
def disable_replication(self, context, group):
context.authorize(gp_action_policy.DISABLE_REP, target_obj=group)
self._check_type(group)
valid_status = [c_fields.GroupStatus.AVAILABLE,
c_fields.GroupStatus.ERROR]
if group.status not in valid_status:
params = {'valid': valid_status,
'current': group.status,
'id': group.id}
msg = _("Group %(id)s status must be %(valid)s, "
"but current status is: %(current)s. "
"Cannot disable replication.") % params
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
valid_rep_status = [c_fields.ReplicationStatus.ENABLED,
c_fields.ReplicationStatus.ERROR]
if group.replication_status not in valid_rep_status:
params = {'valid': valid_rep_status,
'current': group.replication_status,
'id': group.id}
msg = _("Group %(id)s replication status must be %(valid)s, "
"but current status is: %(current)s. "
"Cannot disable replication.") % params
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
volumes = objects.VolumeList.get_all_by_generic_group(
context.elevated(), group.id)
for vol in volumes:
if vol.replication_status not in valid_rep_status:
params = {'valid': valid_rep_status,
'current': vol.replication_status,
'id': vol.id}
msg = _("Volume %(id)s replication status must be %(valid)s, "
"but current status is: %(current)s. "
"Cannot disable replication.") % params
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
vol.replication_status = c_fields.ReplicationStatus.DISABLING
vol.save()
group.replication_status = c_fields.ReplicationStatus.DISABLING
group.save()
self.volume_rpcapi.disable_replication(context, group)
def failover_replication(self, context, group,
allow_attached_volume=False,
secondary_backend_id=None):
context.authorize(gp_action_policy.FAILOVER_REP, target_obj=group)
self._check_type(group)
valid_status = [c_fields.GroupStatus.AVAILABLE]
if group.status not in valid_status:
params = {'valid': valid_status,
'current': group.status,
'id': group.id}
msg = _("Group %(id)s status must be %(valid)s, "
"but current status is: %(current)s. "
"Cannot failover replication.") % params
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
valid_rep_status = [c_fields.ReplicationStatus.ENABLED,
c_fields.ReplicationStatus.FAILED_OVER]
if group.replication_status not in valid_rep_status:
params = {'valid': valid_rep_status,
'current': group.replication_status,
'id': group.id}
msg = _("Group %(id)s replication status must be %(valid)s, "
"but current status is: %(current)s. "
"Cannot failover replication.") % params
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
volumes = objects.VolumeList.get_all_by_generic_group(
context.elevated(), group.id)
valid_status = ['available', 'in-use']
for vol in volumes:
if vol.status not in valid_status:
params = {'valid': valid_status,
'current': vol.status,
'id': vol.id}
msg = _("Volume %(id)s status must be %(valid)s, "
"but current status is: %(current)s. "
"Cannot failover replication.") % params
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if vol.status == 'in-use' and not allow_attached_volume:
msg = _("Volume %s is attached but allow_attached_volume flag "
"is False. Cannot failover replication.") % vol.id
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if vol.replication_status not in valid_rep_status:
params = {'valid': valid_rep_status,
'current': vol.replication_status,
'id': vol.id}
msg = _("Volume %(id)s replication status must be %(valid)s, "
"but current status is: %(current)s. "
"Cannot failover replication.") % params
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
vol.replication_status = c_fields.ReplicationStatus.FAILING_OVER
vol.save()
group.replication_status = c_fields.ReplicationStatus.FAILING_OVER
group.save()
self.volume_rpcapi.failover_replication(context, group,
allow_attached_volume,
secondary_backend_id)
def list_replication_targets(self, context, group):
context.authorize(gp_action_policy.LIST_REP, target_obj=group)
self._check_type(group)
return self.volume_rpcapi.list_replication_targets(context, group)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/i18n.py 0000664 0000000 0000000 00000002123 15131732575 0020763 0 ustar 00root root 0000000 0000000 # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""oslo.i18n integration module.
See https://docs.openstack.org/oslo.i18n/latest/user/index.html .
"""
import oslo_i18n as i18n
DOMAIN = 'cinder'
_translators = i18n.TranslatorFactory(domain=DOMAIN)
# The primary translation function using the well-known name "_"
_ = _translators.primary
def enable_lazy(enable=True):
return i18n.enable_lazy(enable)
def translate(value, user_locale=None):
return i18n.translate(value, user_locale)
def get_available_languages():
return i18n.get_available_languages(DOMAIN)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/image/ 0000775 0000000 0000000 00000000000 15131732575 0020716 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/image/__init__.py 0000664 0000000 0000000 00000000000 15131732575 0023015 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/image/accelerator.py 0000664 0000000 0000000 00000005641 15131732575 0023562 0 ustar 00root root 0000000 0000000 #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_config import cfg
from oslo_utils import importutils
from cinder import exception
from cinder.i18n import _
CONF = cfg.CONF
# NOTE(ZhengMa): The order of the option is improtant, accelerators
# are looked by this list order
# Be careful to edit it
_ACCEL_PATH_PREFERENCE_ORDER_LIST = [
'cinder.image.accelerators.qat.AccelQAT',
'cinder.image.accelerators.gzip.AccelGZIP',
]
class AccelBase(object, metaclass=abc.ABCMeta):
def __init__(self):
return
@abc.abstractmethod
def is_accel_exist(self):
return
@abc.abstractmethod
def compress_img(self, src, dest, run_as_root):
return
@abc.abstractmethod
def decompress_img(self, src, dest, run_as_root):
return
class ImageAccel(object):
def __init__(self, src, dest):
self.src = src
self.dest = dest
self.compression_format = CONF.compression_format
if self.compression_format == 'gzip':
self._accel_engine_path = _ACCEL_PATH_PREFERENCE_ORDER_LIST
else:
self._accel_engine_path = None
self.engine = self._get_engine()
def _get_engine(self, *args, **kwargs):
if self._accel_engine_path:
for accel in self._accel_engine_path:
engine_cls = importutils.import_class(accel)
eng = engine_cls(*args, **kwargs)
if eng.is_accel_exist():
return eng
ex_msg = _("No valid accelerator")
raise exception.CinderException(ex_msg)
def is_engine_ready(self):
if not self.engine:
return False
if not self.engine.is_accel_exist():
return False
return True
def compress_img(self, run_as_root):
if not self.is_engine_ready():
return
self.engine.compress_img(self.src,
self.dest,
run_as_root)
def decompress_img(self, run_as_root):
if not self.is_engine_ready():
return
self.engine.decompress_img(self.src,
self.dest,
run_as_root)
def is_gzip_compressed(image_file):
# The first two bytes of a gzip file are: 1f 8b
GZIP_MAGIC_BYTES = b'\x1f\x8b'
with open(image_file, 'rb') as f:
return f.read(2) == GZIP_MAGIC_BYTES
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/image/accelerators/ 0000775 0000000 0000000 00000000000 15131732575 0023365 5 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/image/accelerators/__init__.py 0000664 0000000 0000000 00000000000 15131732575 0025464 0 ustar 00root root 0000000 0000000 cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/image/accelerators/gzip.py 0000664 0000000 0000000 00000010045 15131732575 0024710 0 ustar 00root root 0000000 0000000 #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_concurrency import processutils
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _
from cinder.image import accelerator
from cinder import utils
LOG = logging.getLogger(__name__)
class AccelGZIP(accelerator.AccelBase):
def is_accel_exist(self):
cmd = ['which', 'gzip']
try:
utils.execute(*cmd)
except processutils.ProcessExecutionError:
LOG.error("GZIP package is not installed.")
return False
return True
# NOTE(ZhengMa): Gzip compresses a file in-place and adds a .gz
# extension to the filename, so we rename the compressed file back
# to the name Cinder expects it to have.
# (Cinder expects to have A to upload)
# Follow these steps:
# 1. compress A to A.gz (gzip_out_file is A.gz)
# 2. mv A.gz to A (gzip_out_file to dest)
def compress_img(self, src, dest, run_as_root):
try:
gzip_compress_cmd = ['gzip', '-k', src]
utils.execute(*gzip_compress_cmd, run_as_root=run_as_root)
except processutils.ProcessExecutionError as ex:
raise exception.CinderAcceleratorError(
accelerator='GZIP',
description=_("Volume compression failed while "
"uploading to glance. GZIP compression "
"command failed."),
cmd=gzip_compress_cmd,
reason=ex.stderr)
try:
gzip_output_filename = src + '.gz'
mv_cmd = ['mv', gzip_output_filename, dest]
utils.execute(*mv_cmd, run_as_root=run_as_root)
except processutils.ProcessExecutionError as ex:
fnames = {'i_fname': gzip_output_filename, 'o_fname': dest}
raise exception.CinderAcceleratorError(
accelerator='GZIP',
description = _("Failed to rename %(i_fname)s "
"to %(o_fname)s") % fnames,
cmd=mv_cmd,
reason=ex.stderr)
# NOTE(ZhengMa): Gzip can only decompresses a file with a .gz
# extension to the filename, so we rename the original file so
# that it can be accepted by Gzip.
# Follow these steps:
# 1. mv A to A.gz (gzip_in_file is A.gz)
# 2. decompress A.gz to A (gzip_in_file to dest)
def decompress_img(self, src, dest, run_as_root):
try:
gzip_input_filename = dest + '.gz'
mv_cmd = ['mv', src, gzip_input_filename]
utils.execute(*mv_cmd, run_as_root=run_as_root)
except processutils.ProcessExecutionError as ex:
fnames = {'i_fname': src, 'o_fname': gzip_input_filename}
raise exception.CinderAcceleratorError(
accelerator='GZIP',
description = _("Failed to rename %(i_fname)s "
"to %(o_fname)s") % fnames,
cmd=mv_cmd,
reason=ex.stderr)
try:
gzip_decompress_cmd = ['gzip', '-d', gzip_input_filename]
utils.execute(*gzip_decompress_cmd, run_as_root=run_as_root)
except processutils.ProcessExecutionError as ex:
raise exception.CinderAcceleratorError(
accelerator='GZIP',
description = _("Image decompression failed while "
"downloading from glance. GZIP "
"decompression command failed."),
cmd=gzip_decompress_cmd,
reason=ex.stderr)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/image/accelerators/qat.py 0000664 0000000 0000000 00000010041 15131732575 0024520 0 ustar 00root root 0000000 0000000 #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_concurrency import processutils
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _
from cinder.image import accelerator
from cinder import utils
LOG = logging.getLogger(__name__)
class AccelQAT(accelerator.AccelBase):
def is_accel_exist(self):
cmd = ['which', 'qzip']
try:
utils.execute(*cmd)
except processutils.ProcessExecutionError:
LOG.error("QATzip package is not installed.")
return False
return True
# NOTE(ZhengMa): QATzip compresses a file in-place and adds a .gz
# extension to the filename, so we rename the compressed file back
# to the name Cinder expects it to have.
# (Cinder expects to have A to upload)
# Follow these steps:
# 1. compress A to A.gz (src to qat_out_file)
# 2. mv A.gz to A (qat_out_file to dest)
def compress_img(self, src, dest, run_as_root):
try:
qat_compress_cmd = ['qzip', '-k', src, '-o', dest]
utils.execute(*qat_compress_cmd, run_as_root=run_as_root)
except processutils.ProcessExecutionError as ex:
raise exception.CinderAcceleratorError(
accelerator='QAT',
description=_("Volume compression failed while "
"uploading to glance. QAT compression "
"command failed."),
cmd=qat_compress_cmd,
reason=ex.stderr)
try:
qat_output_filename = src + '.gz'
mv_cmd = ['mv', qat_output_filename, dest]
utils.execute(*mv_cmd, run_as_root=run_as_root)
except processutils.ProcessExecutionError as ex:
fnames = {'i_fname': qat_output_filename, 'o_fname': dest}
raise exception.CinderAcceleratorError(
accelerator='QAT',
description = _("Failed to rename %(i_fname)s "
"to %(o_fname)s") % fnames,
cmd=mv_cmd,
reason=ex.stderr)
# NOTE(ZhengMa): QATzip can only decompresses a file with a .gz
# extension to the filename, so we rename the original file so
# that it can be accepted by QATzip.
# Follow these steps:
# 1. mv A to A.gz (qat_in_file is A.gz)
# 2. decompress A.gz to A (qat_in_file to dest)
def decompress_img(self, src, dest, run_as_root):
try:
qat_input_filename = dest + '.gz'
mv_cmd = ['mv', src, qat_input_filename]
utils.execute(*mv_cmd, run_as_root=run_as_root)
except processutils.ProcessExecutionError as ex:
fnames = {'i_fname': src, 'o_fname': qat_input_filename}
raise exception.CinderAcceleratorError(
accelerator='QAT',
description = _("Failed to rename %(i_fname)s "
"to %(o_fname)s") % fnames,
cmd=mv_cmd,
reason=ex.stderr)
try:
qat_decompress_cmd = ['qzip', '-d', qat_input_filename]
utils.execute(*qat_decompress_cmd, run_as_root=run_as_root)
except processutils.ProcessExecutionError as ex:
raise exception.CinderAcceleratorError(
accelerator='QAT',
description = _("Image decompression failed while "
"downloading from glance. QAT "
"decompression command failed."),
cmd=qat_decompress_cmd,
reason=ex.stderr)
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/image/cache.py 0000664 0000000 0000000 00000026576 15131732575 0022353 0 ustar 00root root 0000000 0000000 # Copyright (C) 2015 Pure Storage, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from typing import Optional
from zoneinfo import ZoneInfo
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from cinder import context
from cinder import objects
from cinder import rpc
from cinder import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class ImageVolumeCache(object):
def __init__(self,
db,
volume_api,
max_cache_size_gb: int = 0,
max_cache_size_count: int = 0,
clone_across_pools: bool = False):
self.db = db
self.volume_api = volume_api
self.max_cache_size_gb = int(max_cache_size_gb)
self.max_cache_size_count = int(max_cache_size_count)
self.clone_across_pools = bool(clone_across_pools)
self.notifier = rpc.get_notifier('volume', CONF.host)
def get_by_image_volume(self,
context: context.RequestContext,
volume_id: str):
return self.db.image_volume_cache_get_by_volume_id(context, volume_id)
def evict(self,
context: context.RequestContext,
cache_entry: dict) -> None:
LOG.debug('Evicting image cache entry: %(entry)s.',
{'entry': self._entry_to_str(cache_entry)})
self.db.image_volume_cache_delete(context, cache_entry['volume_id'])
self._notify_cache_eviction(context, cache_entry['image_id'],
cache_entry['host'])
def _get_query_filters(self, volume_ref: objects.Volume) -> dict:
if volume_ref.is_clustered:
return {'cluster_name': volume_ref.cluster_name}
if not self.clone_across_pools:
return {'host': volume_ref.host}
# FIXME(whoami-rajat): If we have two cinder backends pointing to
# two different storage arrays, this logic will allow the operation
# to proceed to clone across two storage arrays which will fail
# eventually. We should at least filter with the hostname in the
# given host value hostname@backend#pool.
return {}
def get_entry(self,
context: context.RequestContext,
volume_ref: objects.Volume,
image_id: str,
image_meta: dict) -> Optional[dict]:
cache_entry = self.db.image_volume_cache_get_and_update_last_used(
context,
image_id,
**self._get_query_filters(volume_ref)
)
if cache_entry:
LOG.debug('Found image-volume cache entry: %(entry)s.',
{'entry': self._entry_to_str(cache_entry)})
if self._should_update_entry(cache_entry, image_meta):
msg = 'Deleting image-volume cache entry that is out-dated'
self.delete_cached_volume(context, cache_entry, msg)
cache_entry = None
if cache_entry:
self._notify_cache_hit(context, cache_entry['image_id'],
cache_entry['host'])
else:
self._notify_cache_miss(context, image_id,
volume_ref['host'])
return cache_entry
def create_cache_entry(self,
context: context.RequestContext,
volume_ref: objects.Volume,
image_id: str,
image_meta: dict) -> dict:
"""Create a new cache entry for an image.
This assumes that the volume described by volume_ref has already been
created and is in an available state.
"""
LOG.debug('Creating new image-volume cache entry for image '
'%(image_id)s on %(service)s',
{'image_id': image_id,
'service': volume_ref.service_topic_queue})
# When we are creating an image from a volume the updated_at field
# will be a unicode representation of the datetime. In that case
# we just need to parse it into one. If it is an actual datetime
# we want to just grab it as a UTC naive datetime.
image_updated_at = image_meta['updated_at']
if isinstance(image_updated_at, str):
image_updated_at = timeutils.parse_strtime(image_updated_at)
else:
image_updated_at = image_updated_at.astimezone(ZoneInfo('UTC'))
cache_entry = self.db.image_volume_cache_create(
context,
volume_ref.host,
volume_ref.cluster_name,
image_id,
image_updated_at.replace(tzinfo=None),
volume_ref.id,
volume_ref.size
)
LOG.debug('New image-volume cache entry created: %(entry)s.',
{'entry': self._entry_to_str(cache_entry)})
return cache_entry
def delete_cached_volume(self,
context: context.RequestContext,
cache_entry: dict,
msg: str) -> None:
"""Delete a volume and remove cache entry."""
LOG.debug('%(msg)s: entry %(entry)s.',
{'msg': msg, 'entry': self._entry_to_str(cache_entry)})
volume = objects.Volume.get_by_id(context, cache_entry['volume_id'])
# Delete will evict the cache entry.
self.volume_api.delete(context, volume)
def ensure_space(self,
context: context.RequestContext,
volume: objects.Volume) -> bool:
"""Makes room for a volume cache entry.
Returns True if successful, false otherwise.
"""
# Check to see if the cache is actually limited.
if self.max_cache_size_gb == 0 and self.max_cache_size_count == 0:
return True
# Make sure that we can potentially fit the image in the cache
# and bail out before evicting everything else to try and make
# room for it.
if (self.max_cache_size_gb != 0 and
volume.size > self.max_cache_size_gb):
return False
# Assume the entries are ordered by most recently used to least used.
entries = self.db.image_volume_cache_get_all(
context,
**self._get_query_filters(volume))
current_count = len(entries)
current_size = 0
for entry in entries:
current_size += entry['size']
# Add values for the entry we intend to create.
current_size += volume.size
current_count += 1
LOG.debug('Image-volume cache for %(service)s current_size (GB) = '
'%(size_gb)s (max = %(max_gb)s), current count = %(count)s '
'(max = %(max_count)s).',
{'service': volume.service_topic_queue,
'size_gb': current_size,
'max_gb': self.max_cache_size_gb,
'count': current_count,
'max_count': self.max_cache_size_count})
while (((current_size > self.max_cache_size_gb and
self.max_cache_size_gb > 0)
or (current_count > self.max_cache_size_count and
self.max_cache_size_count > 0))
and len(entries)):
entry = entries.pop()
msg = 'Deleting image-volume cache entry to reclaim space'
self.delete_cached_volume(context, entry, msg)
current_size -= entry['size']
current_count -= 1
LOG.debug('Image-volume cache for %(service)s new size (GB) = '
'%(size_gb)s, new count = %(count)s.',
{'service': volume.service_topic_queue,
'size_gb': current_size,
'count': current_count})
# It is only possible to not free up enough gb, we will always be able
# to free enough count. This is because 0 means unlimited which means
# it is guaranteed to be >0 if limited, and we can always delete down
# to 0.
if self.max_cache_size_gb > 0:
if current_size > self.max_cache_size_gb > 0:
LOG.warning('Image-volume cache for %(service)s does '
'not have enough space (GB).',
{'service': volume.service_topic_queue})
return False
return True
@utils.if_notifications_enabled
def _notify_cache_hit(self,
context: context.RequestContext,
image_id: str,
host: str) -> None:
self._notify_cache_action(context, image_id, host, 'hit')
@utils.if_notifications_enabled
def _notify_cache_miss(self,
context: context.RequestContext,
image_id: str,
host: str) -> None:
self._notify_cache_action(context, image_id, host, 'miss')
@utils.if_notifications_enabled
def _notify_cache_eviction(self,
context: context.RequestContext,
image_id: str,
host: str) -> None:
self._notify_cache_action(context, image_id, host, 'evict')
@utils.if_notifications_enabled
def _notify_cache_action(self,
context: context.RequestContext,
image_id: str,
host: str,
action: str) -> None:
data = {
'image_id': image_id,
'host': host,
}
LOG.debug('ImageVolumeCache notification: action=%(action)s'
' data=%(data)s.', {'action': action, 'data': data})
self.notifier.info(context, 'image_volume_cache.%s' % action, data)
def _should_update_entry(self,
cache_entry: dict,
image_meta: dict) -> bool:
"""Ensure that the cache entry image data is still valid."""
image_updated_utc = (image_meta['updated_at']
.astimezone(ZoneInfo('UTC')))
cache_updated_utc = (cache_entry['image_updated_at']
.replace(tzinfo=ZoneInfo('UTC')))
LOG.debug('Image-volume cache entry image_update_at = %(entry_utc)s, '
'requested image updated_at = %(image_utc)s.',
{'entry_utc': str(cache_updated_utc),
'image_utc': str(image_updated_utc)})
return image_updated_utc != cache_updated_utc
def _entry_to_str(self, cache_entry: dict) -> str:
return str({
'id': cache_entry['id'],
'image_id': cache_entry['image_id'],
'volume_id': cache_entry['volume_id'],
'host': cache_entry['host'],
'size': cache_entry['size'],
'image_updated_at': cache_entry['image_updated_at'],
'last_used': cache_entry['last_used'],
})
cinder-27.0.0+git20260115.159.4fef6d9d4/cinder/image/format_inspector.py 0000664 0000000 0000000 00000104023 15131732575 0024646 0 ustar 00root root 0000000 0000000 # Copyright 2020 Red Hat, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This is a python implementation of virtual disk format inspection routines
gathered from various public specification documents, as well as qemu disk
driver code. It attempts to store and parse the minimum amount of data
required, and in a streaming-friendly manner to collect metadata about
complex-format images.
"""
import struct
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
def chunked_reader(fileobj, chunk_size=512):
while True:
chunk = fileobj.read(chunk_size)
if not chunk:
break
yield chunk
class CaptureRegion(object):
"""Represents a region of a file we want to capture.
A region of a file we want to capture requires a byte offset into
the file and a length. This is expected to be used by a data
processing loop, calling capture() with the most recently-read
chunk. This class handles the task of grabbing the desired region
of data across potentially multiple fractional and unaligned reads.
:param offset: Byte offset into the file starting the region
:param length: The length of the region
"""
def __init__(self, offset, length):
self.offset = offset
self.length = length
self.data = b''
@property
def complete(self):
"""Returns True when we have captured the desired data."""
return self.length == len(self.data)
def capture(self, chunk, current_position):
"""Process a chunk of data.
This should be called for each chunk in the read loop, at least
until complete returns True.
:param chunk: A chunk of bytes in the file
:param current_position: The position of the file processed by the
read loop so far. Note that this will be
the position in the file *after* the chunk
being presented.
"""
read_start = current_position - len(chunk)
if (read_start <= self.offset <= current_position or
self.offset <= read_start <= (self.offset + self.length)):
if read_start < self.offset:
lead_gap = self.offset - read_start
else:
lead_gap = 0
self.data += chunk[lead_gap:]
self.data = self.data[:self.length]
class ImageFormatError(Exception):
"""An unrecoverable image format error that aborts the process."""
pass
class TraceDisabled(object):
"""A logger-like thing that swallows tracing when we do not want it."""
def debug(self, *a, **k):
pass
info = debug
warning = debug
error = debug
class FileInspector(object):
"""A stream-based disk image inspector.
This base class works on raw images and is subclassed for more
complex types. It is to be presented with the file to be examined
one chunk at a time, during read processing and will only store
as much data as necessary to determine required attributes of
the file.
"""
def __init__(self, tracing=False):
self._total_count = 0
# NOTE(danms): The logging in here is extremely verbose for a reason,
# but should never really be enabled at that level at runtime. To
# retain all that work and assist in future debug, we have a separate
# debug flag that can be passed from a manual tool to turn it on.
if tracing:
self._log = logging.getLogger(str(self))
else:
self._log = TraceDisabled()
self._capture_regions = {}
def _capture(self, chunk, only=None):
for name, region in self._capture_regions.items():
if only and name not in only:
continue
if not region.complete:
region.capture(chunk, self._total_count)
def eat_chunk(self, chunk):
"""Call this to present chunks of the file to the inspector."""
pre_regions = set(self._capture_regions.keys())
# Increment our position-in-file counter
self._total_count += len(chunk)
# Run through the regions we know of to see if they want this
# data
self._capture(chunk)
# Let the format do some post-read processing of the stream
self.post_process()
# Check to see if the post-read processing added new regions
# which may require the current chunk.
new_regions = set(self._capture_regions.keys()) - pre_regions
if new_regions:
self._capture(chunk, only=new_regions)
def post_process(self):
"""Post-read hook to process what has been read so far.
This will be called after each chunk is read and potentially captured
by the defined regions. If any regions are defined by this call,
those regions will be presented with the current chunk in case it
is within one of the new regions.
"""
pass
def region(self, name):
"""Get a CaptureRegion by name."""
return self._capture_regions[name]
def new_region(self, name, region):
"""Add a new CaptureRegion by name."""
if self.has_region(name):
# This is a bug, we tried to add the same region twice
raise ImageFormatError('Inspector re-added region %s' % name)
self._capture_regions[name] = region
def has_region(self, name):
"""Returns True if named region has been defined."""
return name in self._capture_regions
@property
def format_match(self):
"""Returns True if the file appears to be the expected format."""
return True
@property
def virtual_size(self):
"""Returns the virtual size of the disk image, or zero if unknown."""
return self._total_count
@property
def actual_size(self):
"""Returns the total size of the file.
This is usually smaller than virtual_size. NOTE: this will only be
accurate if the entire file is read and processed.
"""
return self._total_count
@property
def complete(self):
"""Returns True if we have all the information needed."""
return all(r.complete for r in self._capture_regions.values())
def __str__(self):
"""The string name of this file format."""
return 'raw'
@property
def context_info(self):
"""Return info on amount of data held in memory for auditing.
This is a dict of region:sizeinbytes items that the inspector
uses to examine the file.
"""
return {name: len(region.data) for name, region in
self._capture_regions.items()}
@classmethod
def from_file(cls, filename):
"""Read as much of a file as necessary to complete inspection.
NOTE: Because we only read as much of the file as necessary, the
actual_size property will not reflect the size of the file, but the
amount of data we read before we satisfied the inspector.
Raises ImageFormatError if we cannot parse the file.
"""
inspector = cls()
with open(filename, 'rb') as f:
for chunk in chunked_reader(f):
inspector.eat_chunk(chunk)
if inspector.complete:
# No need to eat any more data
break
if not inspector.complete or not inspector.format_match:
raise ImageFormatError('File is not in requested format')
return inspector
def safety_check(self):
"""Perform some checks to determine if this file is safe.
Returns True if safe, False otherwise. It may raise ImageFormatError
if safety cannot be guaranteed because of parsing or other errors.
"""
return True
# The qcow2 format consists of a big-endian 72-byte header, of which
# only a small portion has information we care about:
#
# Dec Hex Name
# 0 0x00 Magic 4-bytes 'QFI\xfb'
# 4 0x04 Version (uint32_t, should always be 2 for modern files)
# . . .
# 8 0x08 Backing file offset (uint64_t)
# 24 0x18 Size in bytes (unint64_t)
# . . .
# 72 0x48 Incompatible features bitfield (6 bytes)
#
# https://gitlab.com/qemu-project/qemu/-/blob/master/docs/interop/qcow2.txt
class QcowInspector(FileInspector):
"""QEMU QCOW2 Format
This should only require about 32 bytes of the beginning of the file
to determine the virtual size, and 104 bytes to perform the safety check.
"""
BF_OFFSET = 0x08
BF_OFFSET_LEN = 8
I_FEATURES = 0x48
I_FEATURES_LEN = 8
I_FEATURES_DATAFILE_BIT = 3
I_FEATURES_MAX_BIT = 4
def __init__(self, *a, **k):
super(QcowInspector, self).__init__(*a, **k)
self.new_region('header', CaptureRegion(0, 512))
def _qcow_header_data(self):
magic, version, bf_offset, bf_sz, cluster_bits, size = (
struct.unpack('>4sIQIIQ', self.region('header').data[:32]))
return magic, size
@property
def has_header(self):
return self.region('header').complete
@property
def virtual_size(self):
if not self.region('header').complete:
return 0
if not self.format_match:
return 0
magic, size = self._qcow_header_data()
return size
@property
def format_match(self):
if not self.region('header').complete:
return False
magic, size = self._qcow_header_data()
return magic == b'QFI\xFB'
@property
def has_backing_file(self):
if not self.region('header').complete:
return None
if not self.format_match:
return False
bf_offset_bytes = self.region('header').data[
self.BF_OFFSET:self.BF_OFFSET + self.BF_OFFSET_LEN]
# nonzero means "has a backing file"
bf_offset, = struct.unpack('>Q', bf_offset_bytes)
return bf_offset != 0
@property
def has_unknown_features(self):
if not self.region('header').complete:
return None
if not self.format_match:
return False
i_features = self.region('header').data[
self.I_FEATURES:self.I_FEATURES + self.I_FEATURES_LEN]
# This is the maximum byte number we should expect any bits to be set
max_byte = self.I_FEATURES_MAX_BIT // 8
# The flag bytes are in big-endian ordering, so if we process
# them in index-order, they're reversed
for i, byte_num in enumerate(reversed(range(self.I_FEATURES_LEN))):
if byte_num == max_byte:
# If we're in the max-allowed byte, allow any bits less than
# the maximum-known feature flag bit to be set
allow_mask = ((1 << self.I_FEATURES_MAX_BIT) - 1)
elif byte_num > max_byte:
# If we're above the byte with the maximum known feature flag
# bit, then we expect all zeroes
allow_mask = 0x0
else:
# Any earlier-than-the-maximum byte can have any of the flag
# bits set
allow_mask = 0xFF
if i_features[i] & ~allow_mask:
LOG.warning('Found unknown feature bit in byte %i: %s/%s',
byte_num, bin(i_features[byte_num] & ~allow_mask),
bin(allow_mask))
return True
return False
@property
def has_data_file(self):
if not self.region('header').complete:
return None
if not self.format_match:
return False
i_features = self.region('header').data[
self.I_FEATURES:self.I_FEATURES + self.I_FEATURES_LEN]
# First byte of bitfield, which is i_features[7]
byte = self.I_FEATURES_LEN - 1 - self.I_FEATURES_DATAFILE_BIT // 8
# Third bit of bitfield, which is 0x04
bit = 1 << (self.I_FEATURES_DATAFILE_BIT - 1 % 8)
return bool(i_features[byte] & bit)
def __str__(self):
return 'qcow2'
def safety_check(self):
return (not self.has_backing_file and
not self.has_data_file and
not self.has_unknown_features)
def safety_check_allow_backing_file(self):
return (not self.has_data_file and
not self.has_unknown_features)
class QEDInspector(FileInspector):
def __init__(self, tracing=False):
super().__init__(tracing)
self.new_region('header', CaptureRegion(0, 512))
@property
def format_match(self):
if not self.region('header').complete:
return False
return self.region('header').data.startswith(b'QED\x00')
def safety_check(self):
# QED format is not supported by anyone, but we want to detect it
# and mark it as just always unsafe.
return False
# The VHD (or VPC as QEMU calls it) format consists of a big-endian
# 512-byte "footer" at the beginning of the file with various
# information, most of which does not matter to us:
#
# Dec Hex Name
# 0 0x00 Magic string (8-bytes, always 'conectix')
# 40 0x28 Disk size (uint64_t)
#
# https://github.com/qemu/qemu/blob/master/block/vpc.c
class VHDInspector(FileInspector):
"""Connectix/MS VPC VHD Format
This should only require about 512 bytes of the beginning of the file
to determine the virtual size.
"""
def __init__(self, *a, **k):
super(VHDInspector, self).__init__(*a, **k)
self.new_region('header', CaptureRegion(0, 512))
@property
def format_match(self):
return self.region('header').data.startswith(b'conectix')
@property
def virtual_size(self):
if not self.region('header').complete:
return 0
if not self.format_match:
return 0
return struct.unpack('>Q', self.region('header').data[40:48])[0]
def __str__(self):
return 'vhd'
# The VHDX format consists of a complex dynamic little-endian
# structure with multiple regions of metadata and data, linked by
# offsets with in the file (and within regions), identified by MSFT
# GUID strings. The header is a 320KiB structure, only a few pieces of
# which we actually need to capture and interpret:
#
# Dec Hex Name
# 0 0x00000 Identity (Technically 9-bytes, padded to 64KiB, the first
# 8 bytes of which are 'vhdxfile')
# 196608 0x30000 The Region table (64KiB of a 32-byte header, followed
# by up to 2047 36-byte region table entry structures)
#
# The region table header includes two items we need to read and parse,
# which are:
#
# 196608 0x30000 4-byte signature ('regi')
# 196616 0x30008 Entry count (uint32-t)
#
# The region table entries follow the region table header immediately
# and are identified by a 16-byte GUID, and provide an offset of the
# start of that region. We care about the "metadata region", identified
# by the METAREGION class variable. The region table entry is (offsets
# from the beginning of the entry, since it could be in multiple places):
#
# 0 0x00000 16-byte MSFT GUID
# 16 0x00010 Offset of the actual metadata region (uint64_t)
#
# When we find the METAREGION table entry, we need to grab that offset
# and start examining the region structure at that point. That
# consists of a metadata table of structures, which point to places in
# the data in an unstructured space that follows. The header is
# (offsets relative to the region start):
#
# 0 0x00000 8-byte signature ('metadata')
# . . .
# 16 0x00010 2-byte entry count (up to 2047 entries max)
#
# This header is followed by the specified number of metadata entry
# structures, identified by GUID:
#
# 0 0x00000 16-byte MSFT GUID
# 16 0x00010 4-byte offset (uint32_t, relative to the beginning of
# the metadata region)
#
# We need to find the "Virtual Disk Size" metadata item, identified by
# the GUID in the VIRTUAL_DISK_SIZE class variable, grab the offset,
# add it to the offset of the metadata region, and examine that 8-byte
# chunk of data that follows.
#
# The "Virtual Disk Size" is a naked uint64_t which contains the size
# of the virtual disk, and is our ultimate target here.
#
# https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-vhdx/83e061f8-f6e2-4de1-91bd-5d518a43d477
class VHDXInspector(FileInspector):
"""MS VHDX Format
This requires some complex parsing of the stream. The first 256KiB
of the image is stored to get the header and region information,
and then we capture the first metadata region to read those
records, find the location of the virtual size data and parse
it. This needs to store the metadata table entries up until the
VDS record, which may consist of up to 2047 32-byte entries at
max. Finally, it must store a chunk of data at the offset of the
actual VDS uint64.
"""
METAREGION = '8B7CA206-4790-4B9A-B8FE-575F050F886E'
VIRTUAL_DISK_SIZE = '2FA54224-CD1B-4876-B211-5DBED83BF4B8'
VHDX_METADATA_TABLE_MAX_SIZE = 32 * 2048 # From qemu
def __init__(self, *a, **k):
super(VHDXInspector, self).__init__(*a, **k)
self.new_region('ident', CaptureRegion(0, 32))
self.new_region('header', CaptureRegion(192 * 1024, 64 * 1024))
def post_process(self):
# After reading a chunk, we may have the following conditions:
#
# 1. We may have just completed the header region, and if so,
# we need to immediately read and calculate the location of
# the metadata region, as it may be starting in the same
# read we just did.
# 2. We may have just completed the metadata region, and if so,
# we need to immediately calculate the location of the
# "virtual disk size" record, as it may be starting in the
# same read we just did.
if self.region('header').complete and not self.has_region('metadata'):
region = self._find_meta_region()
if region:
self.new_region('metadata', region)
elif self.has_region('metadata') and not self.has_region('vds'):
region = self._find_meta_entry(self.VIRTUAL_DISK_SIZE)
if region:
self.new_region('vds', region)
@property
def format_match(self):
return self.region('ident').data.startswith(b'vhdxfile')
@staticmethod
def _guid(buf):
"""Format a MSFT GUID from the 16-byte input buffer."""
guid_format = '= 2048:
raise ImageFormatError('Region count is %i (limit 2047)' % count)
# Process the regions until we find the metadata one; grab the
# offset and return
self._log.debug('Region entry first is %x', region_entry_first)
self._log.debug('Region entries %i', count)
meta_offset = 0
for i in range(0, count):
entry_start = region_entry_first + (i * 32)
entry_end = entry_start + 32
entry = self.region('header').data[entry_start:entry_end]
self._log.debug('Entry offset is %x', entry_start)
# GUID is the first 16 bytes
guid = self._guid(entry[:16])
if guid == self.METAREGION:
# This entry is the metadata region entry
meta_offset, meta_len, meta_req = struct.unpack(
'= 2048:
raise ImageFormatError(
'Metadata item count is %i (limit 2047)' % count)
for i in range(0, count):
entry_offset = 32 + (i * 32)
guid = self._guid(meta_buffer[entry_offset:entry_offset + 16])
if guid == desired_guid:
# Found the item we are looking for by id.
# Stop our region from capturing
item_offset, item_length, _reserved = struct.unpack(
'